diff -urN zz/include/linux/rcupdate.h z/include/linux/rcupdate.h --- zz/include/linux/rcupdate.h Thu Jan 1 01:00:00 1970 +++ z/include/linux/rcupdate.h Mon Sep 10 07:01:50 2001 @@ -0,0 +1,48 @@ +/* + * Read-Copy Update mechanism for Linux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef _LINUX_RCUPDATE_H +#define _LINUX_RCUPDATE_H + +#include +#include +#include +#include + +struct rcu_data { + struct task_struct *krcud_task; + struct semaphore krcud_sema; +} ____cacheline_aligned_in_smp; + +#define krcud_task(cpu) rcu_data[(cpu)].krcud_task +#define krcud_sema(cpu) rcu_data[(cpu)].krcud_sema + +struct rcu_head +{ + struct list_head list; + void (*func)(void * arg); + void * arg; +}; + +extern void call_rcu(struct rcu_head * head, void (*func)(void * arg), void * arg); + +#endif diff -urN zz/kernel/Makefile z/kernel/Makefile --- zz/kernel/Makefile Tue Jan 2 17:41:22 2001 +++ z/kernel/Makefile Mon Sep 10 06:42:20 2001 @@ -9,12 +9,12 @@ O_TARGET := kernel.o -export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o +export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o rcupdate.o obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \ module.o exit.o itimer.o info.o time.o softirq.o resource.o \ sysctl.o acct.o capability.o ptrace.o timer.o user.o \ - signal.o sys.o kmod.o context.o + signal.o sys.o kmod.o context.o rcupdate.o obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += ksyms.o diff -urN zz/kernel/rcupdate.c z/kernel/rcupdate.c --- zz/kernel/rcupdate.c Thu Jan 1 01:00:00 1970 +++ z/kernel/rcupdate.c Mon Sep 10 06:58:57 2001 @@ -0,0 +1,165 @@ +/* + * Read-Copy Update mechanism for Linux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#include +#include +#include +#include +#include +#include +#include + +asmlinkage long sys_sched_get_priority_max(int policy); + +static spinlock_t rcu_lock = SPIN_LOCK_UNLOCKED; +static struct list_head rcu_wait_list; +static struct tq_struct rcu_task; +static struct semaphore rcu_sema; +static struct rcu_data rcu_data[NR_CPUS]; + +/* + * Wait for all the CPUs to go through a quiescent state. It assumes + * that current CPU doesn't have any reference to RCU protected + * data and thus has already undergone a quiescent state since update. + */ +static void wait_for_rcu(void) +{ + int cpu; + int count; + + for (cpu = 0; cpu < smp_num_cpus; cpu++) { + if (cpu == smp_processor_id()) + continue; + up(&krcud_sema(cpu)); + } + count = 0; + while (count++ < smp_num_cpus - 1) + down(&rcu_sema); +} + +/* + * Process a batch of RCU callbacks (the batch can be empty). + * There can be only one batch processed at any point of time. + */ +static void process_pending_rcus(void *arg) +{ + LIST_HEAD(rcu_current_list); + struct list_head * entry; + + spin_lock_irq(&rcu_lock); + list_splice(&rcu_wait_list, rcu_current_list.prev); + INIT_LIST_HEAD(&rcu_wait_list); + spin_unlock_irq(&rcu_lock); + + wait_for_rcu(); + + while ((entry = rcu_current_list.prev) != &rcu_current_list) { + struct rcu_head * head; + + list_del(entry); + head = list_entry(entry, struct rcu_head, list); + head->func(head->arg); + } +} + +/* + * Register a RCU callback to be invoked after all CPUs have + * gone through a quiescent state. + */ +void call_rcu(struct rcu_head * head, void (*func)(void * arg), void * arg) +{ + unsigned long flags; + int start = 0; + + head->func = func; + head->arg = arg; + + spin_lock_irqsave(&rcu_lock, flags); + if (list_empty(&rcu_wait_list)) + start = 1; + list_add(&head->list, &rcu_wait_list); + spin_unlock_irqrestore(&rcu_lock, flags); + + if (start) + schedule_task(&rcu_task); +} + +/* + * Per-CPU RCU dameon. It runs at an absurdly high priority so + * that it is not starved out by the scheduler thereby holding + * up RC updates. + */ +static int krcud(void * __bind_cpu) +{ + int bind_cpu = *(int *) __bind_cpu; + int cpu = cpu_logical_map(bind_cpu); + + daemonize(); + current->policy = SCHED_FIFO; + current->rt_priority = 1001 + sys_sched_get_priority_max(SCHED_FIFO); + + sigfillset(¤t->blocked); + + /* Migrate to the right CPU */ + current->cpus_allowed = 1UL << cpu; + while (smp_processor_id() != cpu) + schedule(); + + sprintf(current->comm, "krcud_CPU%d", bind_cpu); + sema_init(&krcud_sema(cpu), 0); + + krcud_task(cpu) = current; + + for (;;) { + down(&krcud_sema(cpu)); + up(&rcu_sema); + } +} + +static void spawn_krcud(void) +{ + int cpu; + + for (cpu = 0; cpu < smp_num_cpus; cpu++) { + if (kernel_thread(krcud, (void *) &cpu, + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) + printk("spawn_krcud() failed for cpu %d\n", cpu); + else { + while (!krcud_task(cpu_logical_map(cpu))) { + current->policy |= SCHED_YIELD; + schedule(); + } + } + } +} + +static __init int rcu_init(void) +{ + sema_init(&rcu_sema, 0); + rcu_task.routine = process_pending_rcus; + spawn_krcud(); + return 0; +} + +__initcall(rcu_init); + +EXPORT_SYMBOL(call_rcu);