From: "Anil" In flush_workqueue() for a single_threaded_worqueue case the code flushes the same cpu_workqueue_struct for each online_cpu. Change things so that we only perform the flush once in this case. Signed-off-by: Andrew Morton --- 25-akpm/kernel/workqueue.c | 66 +++++++++++++++++++++++---------------------- 1 files changed, 35 insertions(+), 31 deletions(-) diff -puN kernel/workqueue.c~speedup-flush_workqueue-for-singlethread_workqueue kernel/workqueue.c --- 25/kernel/workqueue.c~speedup-flush_workqueue-for-singlethread_workqueue Fri Jun 4 15:22:50 2004 +++ 25-akpm/kernel/workqueue.c Fri Jun 4 15:27:24 2004 @@ -218,6 +218,33 @@ static int worker_thread(void *__cwq) return 0; } +static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) +{ + if (cwq->thread == current) { + /* + * Probably keventd trying to flush its own queue. So simply run + * it by hand rather than deadlocking. + */ + run_workqueue(cwq); + } else { + DEFINE_WAIT(wait); + long sequence_needed; + + spin_lock_irq(&cwq->lock); + sequence_needed = cwq->insert_sequence; + + while (sequence_needed - cwq->remove_sequence > 0) { + prepare_to_wait(&cwq->work_done, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&cwq->lock); + schedule(); + spin_lock_irq(&cwq->lock); + } + finish_wait(&cwq->work_done, &wait); + spin_unlock_irq(&cwq->lock); + } +} + /* * flush_workqueue - ensure that any scheduled work has run to completion. * @@ -234,42 +261,19 @@ static int worker_thread(void *__cwq) */ void fastcall flush_workqueue(struct workqueue_struct *wq) { - struct cpu_workqueue_struct *cwq; - int cpu; - might_sleep(); - lock_cpu_hotplug(); - for_each_online_cpu(cpu) { - DEFINE_WAIT(wait); - long sequence_needed; - if (is_single_threaded(wq)) - cwq = wq->cpu_wq + 0; /* Always use cpu 0's area. */ - else - cwq = wq->cpu_wq + cpu; - - if (cwq->thread == current) { - /* - * Probably keventd trying to flush its own queue. - * So simply run it by hand rather than deadlocking. - */ - run_workqueue(cwq); - continue; - } - spin_lock_irq(&cwq->lock); - sequence_needed = cwq->insert_sequence; + if (is_single_threaded(wq)) { + /* Always use cpu 0's area. */ + flush_cpu_workqueue(wq->cpu_wq + 0); + } else { + int cpu; - while (sequence_needed - cwq->remove_sequence > 0) { - prepare_to_wait(&cwq->work_done, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&cwq->lock); - schedule(); - spin_lock_irq(&cwq->lock); - } - finish_wait(&cwq->work_done, &wait); - spin_unlock_irq(&cwq->lock); + for_each_online_cpu(cpu) + flush_cpu_workqueue(wq->cpu_wq + cpu); } + unlock_cpu_hotplug(); } _