From: William Lee Irwin III This patch series is solving the "thundering herd" problem that occurs in the mainline implementation of hashed waitqueues. There are two sources of spurious wakeups in such arrangements: (a) Hash collisions that place waiters on different objects on the same waitqueue, which wakes threads falsely when any of the objects hashed to the same queue receives a wakeup. i.e. loss of information about which object a wakeup event is related to. (b) Loss of information about which object a given waiter is waiting on. This precludes wake-one semantics for mutual exclusion scenarios. For instance, a lock bit may be slept on. If there are any waiters on the object, a lock bit release event must wake at least one of them so as to prevent deadlock. But without information as to which waiter is waiting on which object, we must resort to waking all waiters who could possibly be waiting on it. Now, as the lock bit provides mutual exclusion, only one of the waiters woken can proceed, and the remainder will go back to sleep and wait for another event, creating unnecessary system load. Once wake-one semantics are established, only one of the waiters waiting to acquire a lock bit need to be woken, which measurably reduces system load and improves efficiency (i.e. it's the subject of the benchmarking I've been sending to you). Even beyond the measurable efficiency gains, there are reasons of robustness and responsiveness to motivate addressing the issue of thundering herds. In a real-life scenario I've been personally involved in resolving, the thundering herd issue caused powerful modern SMP machines with fast IO systems to be unresponsive to user input for a minute at a time or more. Analogues of these patches for the distro kernels involved fully resolved the issue to the customer's satisfaction and obviated workarounds to limit the pagecache's size. The latest spin of these patches basically shoves more pieces of the logic into the wakeup functions, with some efficiency gains from sharing the hot codepath with the rest of the kernel, and a slightly larger diff than the patches with the newly-introduced entrypoint. Writing these was motivated by the push to insulate sched.c from more of the details of wakeup semantics by putting more of the logic into the wakeup functions. In order to accomplish this while still solving (b), the wakeup functions grew a new argument for communication about what object a wakeup event is related to to be passed by the waker. ========= This patch provides an additional argument to wakeup functions so that information may be passed from the waker to the waiter. This is provided as a separate patch so that the overhead of the additional argument can be measured in isolation. No change in performance was observable here. --- 25-akpm/fs/eventpoll.c | 4 ++-- 25-akpm/include/linux/wait.h | 6 +++--- 25-akpm/kernel/fork.c | 4 ++-- 25-akpm/kernel/sched.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff -puN fs/eventpoll.c~wakefunc fs/eventpoll.c --- 25/fs/eventpoll.c~wakefunc 2004-05-07 00:48:33.820749968 -0700 +++ 25-akpm/fs/eventpoll.c 2004-05-07 00:48:33.830748448 -0700 @@ -309,7 +309,7 @@ static int ep_modify(struct eventpoll *e static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi); static int ep_unlink(struct eventpoll *ep, struct epitem *epi); static int ep_remove(struct eventpoll *ep, struct epitem *epi); -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync); +static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key); static int ep_eventpoll_close(struct inode *inode, struct file *file); static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait); static int ep_collect_ready_items(struct eventpoll *ep, @@ -1296,7 +1296,7 @@ eexit_1: * machanism. It is called by the stored file descriptors when they * have events to report. */ -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync) +static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; diff -puN include/linux/wait.h~wakefunc include/linux/wait.h --- 25/include/linux/wait.h~wakefunc 2004-05-07 00:48:33.822749664 -0700 +++ 25-akpm/include/linux/wait.h 2004-05-07 00:57:22.338403072 -0700 @@ -17,8 +17,8 @@ #include typedef struct __wait_queue wait_queue_t; -typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync); -extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync); +typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key); +int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); struct __wait_queue { unsigned int flags; @@ -240,7 +240,7 @@ void FASTCALL(prepare_to_wait(wait_queue void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)); void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); -int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync); +int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); #define DEFINE_WAIT(name) \ wait_queue_t name = { \ diff -puN kernel/fork.c~wakefunc kernel/fork.c --- 25/kernel/fork.c~wakefunc 2004-05-07 00:48:33.823749512 -0700 +++ 25-akpm/kernel/fork.c 2004-05-07 00:48:33.832748144 -0700 @@ -199,9 +199,9 @@ void fastcall finish_wait(wait_queue_hea EXPORT_SYMBOL(finish_wait); -int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync) +int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { - int ret = default_wake_function(wait, mode, sync); + int ret = default_wake_function(wait, mode, sync, key); if (ret) list_del_init(&wait->task_list); diff -puN kernel/sched.c~wakefunc kernel/sched.c --- 25/kernel/sched.c~wakefunc 2004-05-07 00:48:33.825749208 -0700 +++ 25-akpm/kernel/sched.c 2004-05-07 00:57:22.341402616 -0700 @@ -2484,7 +2484,7 @@ need_resched: EXPORT_SYMBOL(preempt_schedule); #endif /* CONFIG_PREEMPT */ -int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) +int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { task_t *p = curr->task; return try_to_wake_up(p, mode, sync); @@ -2511,7 +2511,7 @@ static void __wake_up_common(wait_queue_ unsigned flags; curr = list_entry(tmp, wait_queue_t, task_list); flags = curr->flags; - if (curr->func(curr, mode, sync) && + if (curr->func(curr, mode, sync, NULL) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; _