From: David Woodhouse Debgging infrastructure to drop a stack trace if someone calls one of the sleep_on() functions without lock_kernel() held. --- 25-akpm/include/linux/smp_lock.h | 4 +++- 25-akpm/kernel/sched.c | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff -puN include/linux/smp_lock.h~sleep_on-needs_lock_kernel include/linux/smp_lock.h --- 25/include/linux/smp_lock.h~sleep_on-needs_lock_kernel Thu Jan 22 13:25:31 2004 +++ 25-akpm/include/linux/smp_lock.h Thu Jan 22 13:25:31 2004 @@ -5,7 +5,9 @@ #include #include -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) +#define BKL_DEBUG /* For testing for sleep_on() abuse */ + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) || defined(BKL_DEBUG) extern spinlock_t kernel_flag; diff -puN kernel/sched.c~sleep_on-needs_lock_kernel kernel/sched.c --- 25/kernel/sched.c~sleep_on-needs_lock_kernel Thu Jan 22 13:25:31 2004 +++ 25-akpm/kernel/sched.c Thu Jan 22 13:25:31 2004 @@ -2209,10 +2209,21 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); +#define SLEEP_ON_BKLCHECK \ + if (unlikely(!kernel_locked()) && \ + sleep_on_bkl_warnings < 10) { \ + sleep_on_bkl_warnings++; \ + WARN_ON(1); \ + } + +static int sleep_on_bkl_warnings; + void interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD @@ -2226,6 +2237,8 @@ long interruptible_sleep_on_timeout(wait { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD @@ -2241,6 +2254,8 @@ void sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD @@ -2254,6 +2269,8 @@ long sleep_on_timeout(wait_queue_head_t { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD _