aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDipankar Sarma <dipankar@in.ibm.com>2004-08-22 22:57:53 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-22 22:57:53 -0700
commite86e2311ae3c844a1efe4f6d569dfb548f7f58c7 (patch)
tree19b9b0cf77ffa8c1785c7a5b969306d5f2705caa /include
parentdaf86b08a178f950c0e0ec073c25cc392dbbc789 (diff)
downloadhistory-e86e2311ae3c844a1efe4f6d569dfb548f7f58c7.tar.gz
[PATCH] rcu: clean up code
Avoids per_cpu calculations and also prepares for call_rcu_bh(). At OLS, Rusty had suggested getting rid of many per_cpu() calculations in RCU code and making the code simpler. I had already done that for the rcu-softirq patch earlier, so I am splitting that into two patch. This first patch cleans up the macros and uses pointers to the rcu per-cpu data directly to manipulate the callback queues. This is useful for the call-rcu-bh patch (to follow) which introduces a new RCU mechanism - call_rcu_bh(). Both generic and softirq rcu can then use the same code, they work different global and percpu data. Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/rcupdate.h40
1 files changed, 22 insertions, 18 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b689ab6108bdb9..45ca384109e152 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -101,47 +101,51 @@ struct rcu_data {
struct rcu_head **curtail;
struct rcu_head *donelist;
struct rcu_head **donetail;
+ int cpu;
};
DECLARE_PER_CPU(struct rcu_data, rcu_data);
extern struct rcu_ctrlblk rcu_ctrlblk;
-#define RCU_quiescbatch(cpu) (per_cpu(rcu_data, (cpu)).quiescbatch)
-#define RCU_qsctr(cpu) (per_cpu(rcu_data, (cpu)).qsctr)
-#define RCU_last_qsctr(cpu) (per_cpu(rcu_data, (cpu)).last_qsctr)
-#define RCU_qs_pending(cpu) (per_cpu(rcu_data, (cpu)).qs_pending)
-#define RCU_batch(cpu) (per_cpu(rcu_data, (cpu)).batch)
-#define RCU_nxtlist(cpu) (per_cpu(rcu_data, (cpu)).nxtlist)
-#define RCU_curlist(cpu) (per_cpu(rcu_data, (cpu)).curlist)
-#define RCU_nxttail(cpu) (per_cpu(rcu_data, (cpu)).nxttail)
-#define RCU_curtail(cpu) (per_cpu(rcu_data, (cpu)).curtail)
-#define RCU_donelist(cpu) (per_cpu(rcu_data, (cpu)).donelist)
-#define RCU_donetail(cpu) (per_cpu(rcu_data, (cpu)).donetail)
-
-static inline int rcu_pending(int cpu)
+/*
+ * Increment the quiscent state counter.
+ */
+static inline void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->qsctr++;
+}
+
+static inline int __rcu_pending(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
{
/* This cpu has pending rcu entries and the grace period
* for them has completed.
*/
- if (RCU_curlist(cpu) &&
- !rcu_batch_before(rcu_ctrlblk.completed,RCU_batch(cpu)))
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
return 1;
/* This cpu has no pending entries, but there are new entries */
- if (!RCU_curlist(cpu) && RCU_nxtlist(cpu))
+ if (!rdp->curlist && rdp->nxtlist)
return 1;
- if (RCU_donelist(cpu))
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
return 1;
/* The rcu core waits for a quiescent state from the cpu */
- if (RCU_quiescbatch(cpu) != rcu_ctrlblk.cur || RCU_qs_pending(cpu))
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
return 1;
/* nothing to do */
return 0;
}
+static inline int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu));
+}
+
#define rcu_read_lock() preempt_disable()
#define rcu_read_unlock() preempt_enable()