From: William Lee Irwin III The following patch series consolidates the various instances of waitqueue hashing to use a uniform structure and share the per-zone hashtable among all waitqueue hashers. This is expected to increase the number of hashtable buckets available for waiting on bh's and inodes and eliminate statically allocated kernel data structures for greater node locality and reduced kernel image size. Some attempt was made to look similar to Oleg Nesterov's suggested API in order to provide some kind of credit for independent invention of something very similar (the original versions of these patches predated my public postings on the subject of filtered waitqueues). These patches have the further benefit and intention of enabling aio to use filtered wakeups by standardizing the data structure passed to wake functions so that embedded waitqueue elements in aio structures may be succesfully passed to the filtered wakeup wake functions, though this patch series doesn't implement that particular functionality. Successfully stress-tested on x86-64, and ia64 in recent prior versions. This patch: Move waitqueue -related functions not needing static functions in sched.c to kernel/wait.c Signed-off-by: Andrew Morton --- 25-akpm/kernel/Makefile | 2 25-akpm/kernel/fork.c | 125 ---------------------------------------------- 25-akpm/kernel/wait.c | 129 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 126 deletions(-) diff -puN kernel/fork.c~move-waitqueue-functions-to-kernel-waitc kernel/fork.c --- 25/kernel/fork.c~move-waitqueue-functions-to-kernel-waitc 2004-09-04 18:19:32.102877712 -0700 +++ 25-akpm/kernel/fork.c 2004-09-04 18:19:32.108876800 -0700 @@ -102,131 +102,6 @@ void __put_task_struct(struct task_struc free_task(tsk); } -void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) -{ - unsigned long flags; - - wait->flags &= ~WQ_FLAG_EXCLUSIVE; - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, wait); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL(add_wait_queue); - -void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) -{ - unsigned long flags; - - wait->flags |= WQ_FLAG_EXCLUSIVE; - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue_tail(q, wait); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL(add_wait_queue_exclusive); - -void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) -{ - unsigned long flags; - - spin_lock_irqsave(&q->lock, flags); - __remove_wait_queue(q, wait); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL(remove_wait_queue); - - -/* - * Note: we use "set_current_state()" _after_ the wait-queue add, - * because we need a memory barrier there on SMP, so that any - * wake-function that tests for the wait-queue being active - * will be guaranteed to see waitqueue addition _or_ subsequent - * tests in this thread will see the wakeup having taken place. - * - * The spin_unlock() itself is semi-permeable and only protects - * one way (it only protects stuff inside the critical region and - * stops them from bleeding out - it would still allow subsequent - * loads to move into the the critical region). - */ -void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) -{ - unsigned long flags; - - wait->flags &= ~WQ_FLAG_EXCLUSIVE; - spin_lock_irqsave(&q->lock, flags); - if (list_empty(&wait->task_list)) - __add_wait_queue(q, wait); - /* - * don't alter the task state if this is just going to - * queue an async wait queue callback - */ - if (is_sync_wait(wait)) - set_current_state(state); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL(prepare_to_wait); - -void fastcall -prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) -{ - unsigned long flags; - - wait->flags |= WQ_FLAG_EXCLUSIVE; - spin_lock_irqsave(&q->lock, flags); - if (list_empty(&wait->task_list)) - __add_wait_queue_tail(q, wait); - /* - * don't alter the task state if this is just going to - * queue an async wait queue callback - */ - if (is_sync_wait(wait)) - set_current_state(state); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL(prepare_to_wait_exclusive); - -void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) -{ - unsigned long flags; - - __set_current_state(TASK_RUNNING); - /* - * We can check for list emptiness outside the lock - * IFF: - * - we use the "careful" check that verifies both - * the next and prev pointers, so that there cannot - * be any half-pending updates in progress on other - * CPU's that we haven't seen yet (and that might - * still change the stack area. - * and - * - all other users take the lock (ie we can only - * have _one_ other CPU that looks at or modifies - * the list). - */ - if (!list_empty_careful(&wait->task_list)) { - spin_lock_irqsave(&q->lock, flags); - list_del_init(&wait->task_list); - spin_unlock_irqrestore(&q->lock, flags); - } -} - -EXPORT_SYMBOL(finish_wait); - -int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) -{ - int ret = default_wake_function(wait, mode, sync, key); - - if (ret) - list_del_init(&wait->task_list); - return ret; -} - -EXPORT_SYMBOL(autoremove_wake_function); - void __init fork_init(unsigned long mempages) { #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR diff -puN kernel/Makefile~move-waitqueue-functions-to-kernel-waitc kernel/Makefile --- 25/kernel/Makefile~move-waitqueue-functions-to-kernel-waitc 2004-09-04 18:19:32.103877560 -0700 +++ 25-akpm/kernel/Makefile 2004-09-04 18:19:32.108876800 -0700 @@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o sysctl.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o intermodule.o extable.o params.o posix-timers.o \ - kthread.o + kthread.o wait.o obj-$(CONFIG_FUTEX) += futex.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o diff -puN /dev/null kernel/wait.c --- /dev/null 2003-09-15 06:40:47.000000000 -0700 +++ 25-akpm/kernel/wait.c 2004-09-04 18:19:32.109876648 -0700 @@ -0,0 +1,129 @@ +/* + * Generic waiting primitives. + * + * (C) 2004 William Irwin, Oracle + */ +#include +#include +#include +#include +#include + +void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) +{ + unsigned long flags; + + wait->flags &= ~WQ_FLAG_EXCLUSIVE; + spin_lock_irqsave(&q->lock, flags); + __add_wait_queue(q, wait); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(add_wait_queue); + +void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) +{ + unsigned long flags; + + wait->flags |= WQ_FLAG_EXCLUSIVE; + spin_lock_irqsave(&q->lock, flags); + __add_wait_queue_tail(q, wait); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(add_wait_queue_exclusive); + +void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) +{ + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + __remove_wait_queue(q, wait); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(remove_wait_queue); + + +/* + * Note: we use "set_current_state()" _after_ the wait-queue add, + * because we need a memory barrier there on SMP, so that any + * wake-function that tests for the wait-queue being active + * will be guaranteed to see waitqueue addition _or_ subsequent + * tests in this thread will see the wakeup having taken place. + * + * The spin_unlock() itself is semi-permeable and only protects + * one way (it only protects stuff inside the critical region and + * stops them from bleeding out - it would still allow subsequent + * loads to move into the the critical region). + */ +void fastcall +prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) +{ + unsigned long flags; + + wait->flags &= ~WQ_FLAG_EXCLUSIVE; + spin_lock_irqsave(&q->lock, flags); + if (list_empty(&wait->task_list)) + __add_wait_queue(q, wait); + /* + * don't alter the task state if this is just going to + * queue an async wait queue callback + */ + if (is_sync_wait(wait)) + set_current_state(state); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(prepare_to_wait); + +void fastcall +prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) +{ + unsigned long flags; + + wait->flags |= WQ_FLAG_EXCLUSIVE; + spin_lock_irqsave(&q->lock, flags); + if (list_empty(&wait->task_list)) + __add_wait_queue_tail(q, wait); + /* + * don't alter the task state if this is just going to + * queue an async wait queue callback + */ + if (is_sync_wait(wait)) + set_current_state(state); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(prepare_to_wait_exclusive); + +void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) +{ + unsigned long flags; + + __set_current_state(TASK_RUNNING); + /* + * We can check for list emptiness outside the lock + * IFF: + * - we use the "careful" check that verifies both + * the next and prev pointers, so that there cannot + * be any half-pending updates in progress on other + * CPU's that we haven't seen yet (and that might + * still change the stack area. + * and + * - all other users take the lock (ie we can only + * have _one_ other CPU that looks at or modifies + * the list). + */ + if (!list_empty_careful(&wait->task_list)) { + spin_lock_irqsave(&q->lock, flags); + list_del_init(&wait->task_list); + spin_unlock_irqrestore(&q->lock, flags); + } +} +EXPORT_SYMBOL(finish_wait); + +int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) +{ + int ret = default_wake_function(wait, mode, sync, key); + + if (ret) + list_del_init(&wait->task_list); + return ret; +} +EXPORT_SYMBOL(autoremove_wake_function); _