aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-25 01:21:01 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-25 01:21:01 -0700
commit59a7718e80cd84ffc338cda7e5531d530e3d0749 (patch)
treefdb8297060313d509359f72f8593adc21b661358 /kernel
parent6f60f5cfdd2a51b82479d8285ee2344e74ccb098 (diff)
downloadhistory-59a7718e80cd84ffc338cda7e5531d530e3d0749.tar.gz
Allow BKL re-acquire to fail, causing us to re-schedule.
This allows for low-latency BKL contention even with preemption. Previously, since preemption is disabled over context switches, re-acquiring the kernel lock when resuming a process would be non-preemtible.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 54b7402b941031..c926623e5f5cfc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2502,6 +2502,8 @@ asmlinkage void __sched schedule(void)
need_resched:
preempt_disable();
prev = current;
+ release_kernel_lock(prev);
+need_resched_nonpreemptible:
rq = this_rq();
/*
@@ -2513,7 +2515,6 @@ need_resched:
dump_stack();
}
- release_kernel_lock(prev);
schedstat_inc(rq, sched_cnt);
now = sched_clock();
if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
@@ -2636,7 +2637,9 @@ switch_tasks:
} else
spin_unlock_irq(&rq->lock);
- reacquire_kernel_lock(current);
+ prev = current;
+ if (unlikely(reacquire_kernel_lock(prev) < 0))
+ goto need_resched_nonpreemptible;
preempt_enable_no_resched();
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
goto need_resched;