diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-alpha/system.h x/include/asm-alpha/system.h --- x-ref/include/asm-alpha/system.h 2003-09-01 19:19:20.000000000 +0200 +++ x/include/asm-alpha/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -148,6 +148,9 @@ __asm__ __volatile__("mb": : :"memory") #define rmb() \ __asm__ __volatile__("mb": : :"memory") +#define read_barrier_depends() \ +__asm__ __volatile__("mb": : :"memory") + #define wmb() \ __asm__ __volatile__("wmb": : :"memory") @@ -155,10 +158,12 @@ __asm__ __volatile__("wmb": : :"memory") #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() barrier() #endif #define set_mb(var, value) \ diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-arm/system.h x/include/asm-arm/system.h --- x-ref/include/asm-arm/system.h 2003-08-26 00:13:05.000000000 +0200 +++ x/include/asm-arm/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -45,6 +45,7 @@ extern asmlinkage void __backtrace(void) #define mb() __asm__ __volatile__ ("" : : : "memory") #define rmb() mb() +#define read_barrier_depends() do { } while(0) #define wmb() mb() #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) @@ -78,12 +79,14 @@ extern struct task_struct *__switch_to(s #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) #define cli() local_irq_disable() #define sti() local_irq_enable() diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-cris/system.h x/include/asm-cris/system.h --- x-ref/include/asm-cris/system.h 2003-08-26 00:13:05.000000000 +0200 +++ x/include/asm-cris/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -218,15 +218,18 @@ extern inline unsigned long __xchg(unsig #define wmb() mb() #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define read_barrier_depends() do { } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) #endif #define iret() diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-i386/system.h x/include/asm-i386/system.h --- x-ref/include/asm-i386/system.h 2003-09-01 19:19:28.000000000 +0200 +++ x/include/asm-i386/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -347,6 +347,18 @@ static inline unsigned long __cmpxchg(vo #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define rmb() mb() +/* + * If for whatever reason any version of the cpu would not + * obey to the dependency and read stright into the cache + * without implict barriers, the noop would race. So I prefer + * to go 100% safe and define it as rmb(), instead of nothing. + * At least for 2.4. + */ +#if 0 +#define read_barrier_depends() do { } while(0) +#else +#define read_barrier_depends() rmb() +#endif #ifdef CONFIG_X86_OOSTORE #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") @@ -358,12 +370,14 @@ static inline unsigned long __cmpxchg(vo #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define set_mb(var, value) do { var = value; barrier(); } while (0) +#define smp_read_barrier_depends() do { } while(0) #endif #define set_wmb(var, value) do { var = value; wmb(); } while (0) diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-ia64/system.h x/include/asm-ia64/system.h --- x-ref/include/asm-ia64/system.h 2003-08-26 00:13:06.000000000 +0200 +++ x/include/asm-ia64/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -84,16 +84,19 @@ ia64_insn_group_barrier (void) */ #define mb() __asm__ __volatile__ ("mf" ::: "memory") #define rmb() mb() +#define read_barrier_depends() do { } while(0) #define wmb() mb() #ifdef CONFIG_SMP # define smp_mb() mb() # define smp_rmb() rmb() # define smp_wmb() wmb() +# define smp_read_barrier_depends() read_barrier_depends() #else # define smp_mb() barrier() # define smp_rmb() barrier() # define smp_wmb() barrier() +# define smp_read_barrier_depends() do { } while(0) #endif /* diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-m68k/system.h x/include/asm-m68k/system.h --- x-ref/include/asm-m68k/system.h 2003-06-13 22:07:41.000000000 +0200 +++ x/include/asm-m68k/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -82,6 +82,7 @@ asmlinkage void resume(void); #define nop() do { asm volatile ("nop"); barrier(); } while (0) #define mb() barrier() #define rmb() barrier() +#define read_barrier_depends() do { } while(0) #define wmb() barrier() #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) @@ -89,6 +90,7 @@ asmlinkage void resume(void); #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-mips/system.h x/include/asm-mips/system.h --- x-ref/include/asm-mips/system.h 2003-08-26 00:13:06.000000000 +0200 +++ x/include/asm-mips/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -243,15 +243,18 @@ extern void __global_restore_flags(unsig #define iob() fast_iob() #endif /* !CONFIG_CPU_HAS_WB */ +#define read_barrier_depends() do { } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) \ diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-mips64/system.h x/include/asm-mips64/system.h --- x-ref/include/asm-mips64/system.h 2003-08-26 00:13:07.000000000 +0200 +++ x/include/asm-mips64/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -217,15 +217,18 @@ extern void __global_restore_flags(unsig #define rmb() fast_rmb() #define mb() fast_mb() #define iob() fast_iob() +#define read_barrier_depends() do { } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) \ diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-parisc/system.h x/include/asm-parisc/system.h --- x-ref/include/asm-parisc/system.h 2003-06-13 22:07:41.000000000 +0200 +++ x/include/asm-parisc/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -141,8 +141,10 @@ static inline void set_eiem(unsigned lon #define mb() __asm__ __volatile__("":::"memory"); /* barrier() */ #define rmb() mb() #define wmb() mb() +#define read_barrier_depends() do { } while(0) #define smp_mb() mb() #define smp_wmb() mb() +#define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { var = value; mb(); } while (0) diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-ppc/system.h x/include/asm-ppc/system.h --- x-ref/include/asm-ppc/system.h 2003-06-13 22:07:41.000000000 +0200 +++ x/include/asm-ppc/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -29,6 +29,7 @@ */ #define mb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory") +#define read_barrier_depends() do { } while(0) #define wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define set_mb(var, value) do { var = value; mb(); } while (0) @@ -38,10 +39,12 @@ #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() __asm__ __volatile__("": : :"memory") #define smp_rmb() __asm__ __volatile__("": : :"memory") #define smp_wmb() __asm__ __volatile__("": : :"memory") +#define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ #ifdef __KERNEL__ diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-ppc64/system.h x/include/asm-ppc64/system.h --- x-ref/include/asm-ppc64/system.h 2003-06-27 06:46:24.000000000 +0200 +++ x/include/asm-ppc64/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -33,6 +33,7 @@ */ #define mb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") +#define read_barrier_depends() do { } while(0) #define wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define set_mb(var, value) do { var = value; mb(); } while (0) @@ -42,10 +43,12 @@ #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() __asm__ __volatile__("": : :"memory") #define smp_rmb() __asm__ __volatile__("": : :"memory") #define smp_wmb() __asm__ __volatile__("": : :"memory") +#define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ #ifdef CONFIG_XMON diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-s390/system.h x/include/asm-s390/system.h --- x-ref/include/asm-s390/system.h 2003-09-01 19:19:26.000000000 +0200 +++ x/include/asm-s390/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -125,10 +125,12 @@ static inline unsigned long __xchg(unsig # define SYNC_OTHER_CORES(x) eieio() #define mb() eieio() #define rmb() eieio() +#define read_barrier_depends() do { } while(0) #define wmb() eieio() #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-s390x/system.h x/include/asm-s390x/system.h --- x-ref/include/asm-s390x/system.h 2003-09-01 19:19:26.000000000 +0200 +++ x/include/asm-s390x/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -138,10 +138,12 @@ static inline unsigned long __xchg(unsig # define SYNC_OTHER_CORES(x) eieio() #define mb() eieio() #define rmb() eieio() +#define read_barrier_depends() do { } while(0) #define wmb() eieio() #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-sh/system.h x/include/asm-sh/system.h --- x-ref/include/asm-sh/system.h 2003-08-26 00:13:07.000000000 +0200 +++ x/include/asm-sh/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -88,16 +88,19 @@ extern void __xchg_called_with_bad_point #define mb() __asm__ __volatile__ ("": : :"memory") #define rmb() mb() +#define read_barrier_depends() do { } while(0) #define wmb() __asm__ __volatile__ ("": : :"memory") #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) #endif #define set_mb(var, value) do { var = value; mb(); } while (0) diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-sparc/system.h x/include/asm-sparc/system.h --- x-ref/include/asm-sparc/system.h 2003-06-13 22:07:42.000000000 +0200 +++ x/include/asm-sparc/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -297,11 +297,13 @@ extern void __global_restore_flags(unsig #define mb() __asm__ __volatile__ ("" : : : "memory") #define rmb() mb() #define wmb() mb() +#define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) #define set_wmb(__var, __value) set_mb(__var, __value) #define smp_mb() __asm__ __volatile__("":::"memory"); #define smp_rmb() __asm__ __volatile__("":::"memory"); #define smp_wmb() __asm__ __volatile__("":::"memory"); +#define smp_read_barrier_depends() read_barrier_depends() #define nop() __asm__ __volatile__ ("nop"); diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-sparc64/system.h x/include/asm-sparc64/system.h --- x-ref/include/asm-sparc64/system.h 2003-09-01 19:19:20.000000000 +0200 +++ x/include/asm-sparc64/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -110,6 +110,7 @@ extern void __global_restore_flags(unsig #define mb() \ membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad"); #define rmb() membar("#LoadLoad") +#define read_barrier_depends() do { } while(0) #define wmb() membar("#StoreStore") #define set_mb(__var, __value) \ do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) @@ -120,10 +121,12 @@ extern void __global_restore_flags(unsig #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() __asm__ __volatile__("":::"memory"); #define smp_rmb() __asm__ __volatile__("":::"memory"); #define smp_wmb() __asm__ __volatile__("":::"memory"); +#define smp_read_barrier_depends() do { } while(0) #endif #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/asm-x86_64/system.h x/include/asm-x86_64/system.h --- x-ref/include/asm-x86_64/system.h 2003-06-13 22:07:42.000000000 +0200 +++ x/include/asm-x86_64/system.h 2003-09-01 19:20:07.000000000 +0200 @@ -250,6 +250,18 @@ static inline unsigned long __cmpxchg(vo */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") +/* + * If for whatever reason any version of the cpu would not + * obey to the dependency and read stright into the cache + * without implict barriers, the noop would race. So I prefer + * to go 100% safe and define it as rmb(), instead of nothing. + * At least for 2.4. + */ +#if 0 +#define read_barrier_depends() do { } while(0) +#else +#define read_barrier_depends() rmb() +#endif #define wmb() asm volatile("sfence":::"memory") #define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/linux/list.h x/include/linux/list.h --- x-ref/include/linux/list.h 2003-09-01 19:19:27.000000000 +0200 +++ x/include/linux/list.h 2003-09-01 19:20:07.000000000 +0200 @@ -1,5 +1,6 @@ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H +#include #if defined(__KERNEL__) || defined(_LVM_H_INCLUDE) @@ -69,6 +70,49 @@ static inline void list_add_tail(list_t } /* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static __inline__ void __list_add_rcu(struct list_head * new, + struct list_head * prev, + struct list_head * next) +{ + new->next = next; + new->prev = prev; + smp_wmb(); + next->prev = new; + prev->next = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static __inline__ void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static __inline__ void list_add_tail_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/* * Delete a list entry by making the prev/next entries * point to each other. * @@ -126,6 +170,18 @@ static inline void list_move_tail(list_t } /** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + +/** * list_empty - tests whether a list is empty * @head: the list to test. */ @@ -183,6 +239,7 @@ static inline void list_splice_init(list ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) #include +#include /** * list_for_each - iterate over a list @@ -226,6 +283,30 @@ static inline void list_splice_init(list #define list_first(head) (((head)->next != (head)) ? (head)->next: (struct list_head *) 0) +/** + * list_for_each_rcu - iterate over an rcu-protected list + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + */ +#define list_for_each_rcu(pos, head) \ + for (pos = (head)->next, prefetch(pos->next); pos != (head); \ + pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next)) + +#define __list_for_each_rcu(pos, head) \ + for (pos = (head)->next; pos != (head); \ + pos = pos->next, ({ smp_read_barrier_depends(); 0;})) + +/** + * list_for_each_safe_rcu - iterate over an rcu-protected list safe + * against removal of list entry + * @pos: the &struct list_head to use as a loop counter. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe_rcu(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next) + #endif /* __KERNEL__ || _LVM_H_INCLUDE */ #endif diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/linux/rcupdate.h x/include/linux/rcupdate.h --- x-ref/include/linux/rcupdate.h 2003-09-01 19:19:23.000000000 +0200 +++ x/include/linux/rcupdate.h 2003-09-01 19:20:07.000000000 +0200 @@ -50,7 +50,14 @@ struct rcu_head { INIT_LIST_HEAD(&(ptr)->list); (ptr)->func = NULL; (ptr)->arg = NULL; \ } while (0) - +#ifdef CONFIG_PREEMPT +#define rcu_read_lock() preempt_disable() +#define rcu_read_unlock() preempt_enable() +#else +#define rcu_read_lock() do {} while(0) +#define rcu_read_unlock() do {} while(0) +#endif + extern void FASTCALL(call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)); extern void synchronize_kernel(void);