Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 849

kernel-2.6.18-238.el5.src.rpm

From: Eric Sandeen <sandeen@redhat.com>
Date: Mon, 4 Aug 2008 14:37:26 -0500
Subject: [fs] add percpu_counter_add & _sub
Message-id: 48975A76.70704@redhat.com
O-Subject: [PATCH RHEL5 4/9] UPDATED percpu_counter_add & _sub
Bugzilla: 443896
RH-Acked-by: Jeff Layton <jlayton@redhat.com>
RH-Acked-by: Alexander Viro <aviro@redhat.com>
RH-Acked-by: Jeff Moyer <jmoyer@redhat.com>

This is for:

Bugzilla Bug 443896: RFE: [Ext4 enabler] backport vfs
helpers to facilitate ext4 backport and testing

Incorporates, roughly:

From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Wed, 17 Oct 2007 06:25:42 +0000 (-0700)
Subject: lib: percpu_counter_add
X-Git-Tag: v2.6.24-rc1~814
X-Git-Url: http://git.engineering.redhat.com/?p=linux-2.6.git;a=commitdiff_plain;h=aa0dff2d09bfa50b7d02714a45920c64568e699d

lib: percpu_counter_add

 s/percpu_counter_mod/percpu_counter_add/

Because its a better name, _mod implies modulo.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index f5aa593..d884622 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -39,6 +39,7 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
 }
 
 void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
+void percpu_counter_mod64(struct percpu_counter *fbc, s64 amount);
 s64 percpu_counter_sum(struct percpu_counter *fbc);
 
 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
@@ -84,6 +85,14 @@ percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
 	preempt_enable();
 }
 
+static inline void
+percpu_counter_mod64(struct percpu_counter *fbc, s64 amount)
+{
+	preempt_disable();
+	fbc->count += amount;
+	preempt_enable();
+}
+
 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 {
 	return fbc->count;
@@ -111,4 +120,14 @@ static inline void percpu_counter_dec(struct percpu_counter *fbc)
 	percpu_counter_mod(fbc, -1);
 }
 
+static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+	percpu_counter_mod64(fbc, amount);
+}
+
+static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+	percpu_counter_mod64(fbc, -amount);
+}
+
 #endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8504490..3aa133b 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -25,6 +25,26 @@ void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
 }
 EXPORT_SYMBOL(percpu_counter_mod);
 
+void percpu_counter_mod64(struct percpu_counter *fbc, s64 amount)
+{
+	s64 count;
+	s32 *pcount;
+	int cpu = get_cpu();
+
+	pcount = per_cpu_ptr(fbc->counters, cpu);
+	count = *pcount + amount;
+	if (count >= FBC_BATCH || count <= -FBC_BATCH) {
+		spin_lock(&fbc->lock);
+		fbc->count += count;
+		*pcount = 0;
+		spin_unlock(&fbc->lock);
+	} else {
+		*pcount = count;
+	}
+	put_cpu();
+}
+EXPORT_SYMBOL(percpu_counter_mod64);
+
 /*
  * Add up all the per-cpu counts, return the result.  This is a more accurate
  * but much slower version of percpu_counter_read_positive()