aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDipankar Sarma <dipankar@in.ibm.com>2004-08-22 22:57:42 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-22 22:57:42 -0700
commitdaf86b08a178f950c0e0ec073c25cc392dbbc789 (patch)
tree8b76bd66d2b2c426d42282633c6525716ea9bad6 /kernel
parentf0f4d6e41008746f51db2c795469e1707e516672 (diff)
downloadhistory-daf86b08a178f950c0e0ec073c25cc392dbbc789.tar.gz
[PATCH] RCU: low latency rcu
This patch makes RCU callbacks friendly to scheduler. It helps low latency by limiting the number of callbacks invoked per tasklet handler. Since we cannot schedule during a single softirq handler, this reduces size of non-preemptible section significantly, specially under heavy RCU updates. The limiting is done through a kernel parameter rcupdate.maxbatch which is the maximum number of RCU callbacks to invoke during a single tasklet handler. Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcupdate.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 5a8d9856610b48..c944504fc8d04e 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -40,6 +40,7 @@
#include <asm/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
@@ -63,6 +64,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
#define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
+static int maxbatch = 10;
/**
* call_rcu - Queue an RCU update request.
@@ -93,15 +95,23 @@ void fastcall call_rcu(struct rcu_head *head,
* Invoke the completed RCU callbacks. They are expected to be in
* a per-cpu list.
*/
-static void rcu_do_batch(struct rcu_head *list)
+static void rcu_do_batch(int cpu)
{
- struct rcu_head *next;
+ struct rcu_head *next, *list;
+ int count = 0;
+ list = RCU_donelist(cpu);
while (list) {
- next = list->next;
+ next = RCU_donelist(cpu) = list->next;
list->func(list);
list = next;
+ if (++count >= maxbatch)
+ break;
}
+ if (!RCU_donelist(cpu))
+ RCU_donetail(cpu) = &RCU_donelist(cpu);
+ else
+ tasklet_schedule(&RCU_tasklet(cpu));
}
/*
@@ -261,11 +271,11 @@ void rcu_restart_cpu(int cpu)
static void rcu_process_callbacks(unsigned long unused)
{
int cpu = smp_processor_id();
- struct rcu_head *rcu_list = NULL;
if (RCU_curlist(cpu) &&
!rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) {
- rcu_list = RCU_curlist(cpu);
+ *RCU_donetail(cpu) = RCU_curlist(cpu);
+ RCU_donetail(cpu) = RCU_curtail(cpu);
RCU_curlist(cpu) = NULL;
RCU_curtail(cpu) = &RCU_curlist(cpu);
}
@@ -300,8 +310,8 @@ static void rcu_process_callbacks(unsigned long unused)
local_irq_enable();
}
rcu_check_quiescent_state();
- if (rcu_list)
- rcu_do_batch(rcu_list);
+ if (RCU_donelist(cpu))
+ rcu_do_batch(cpu);
}
void rcu_check_callbacks(int cpu, int user)
@@ -319,6 +329,7 @@ static void __devinit rcu_online_cpu(int cpu)
tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
RCU_curtail(cpu) = &RCU_curlist(cpu);
RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
+ RCU_donetail(cpu) = &RCU_donelist(cpu);
RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
RCU_qs_pending(cpu) = 0;
}
@@ -388,6 +399,6 @@ void synchronize_kernel(void)
wait_for_completion(&rcu.completion);
}
-
+module_param(maxbatch, int, 0);
EXPORT_SYMBOL(call_rcu);
EXPORT_SYMBOL(synchronize_kernel);