aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-01-14 17:40:45 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-14 17:40:45 -0800
commit8b0a7bd8add3c87583556ce8770bd4bc8f83cf41 (patch)
tree6d58bc2bfc33ba6ff7654b9dd73d9bd26dad6ab9 /kernel
parent7619fe4e2f2ce2be7b63c9d50bd396ca2d2bcba1 (diff)
downloadhistory-8b0a7bd8add3c87583556ce8770bd4bc8f83cf41.tar.gz
[PATCH] Don't busy-lock-loop in preemptable spinlocks
Paul Mackerras points out that doing the _raw_spin_trylock each time through the loop will generate tons of unnecessary bus traffic. Instead, after we fail to get the lock we should poll it with simple loads until we see that it is clear and then retry the atomic op. Assuming a reasonable cache design, the loads won't generate any bus traffic until another cpu writes to the cacheline containing the lock. Agreed. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/spinlock.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index b485593430ec1b..beacf8b7cee7e2 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(_write_lock);
* (We do this in a function because inlining it would be excessive.)
*/
-#define BUILD_LOCK_OPS(op, locktype) \
+#define BUILD_LOCK_OPS(op, locktype, is_locked_fn) \
void __lockfunc _##op##_lock(locktype *lock) \
{ \
preempt_disable(); \
@@ -183,7 +183,8 @@ void __lockfunc _##op##_lock(locktype *lock) \
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- cpu_relax(); \
+ while (is_locked_fn(lock) && (lock)->break_lock) \
+ cpu_relax(); \
preempt_disable(); \
} \
} \
@@ -204,7 +205,8 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock) \
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- cpu_relax(); \
+ while (is_locked_fn(lock) && (lock)->break_lock) \
+ cpu_relax(); \
preempt_disable(); \
} \
return flags; \
@@ -244,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
* _[spin|read|write]_lock_irqsave()
* _[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock_t);
-BUILD_LOCK_OPS(read, rwlock_t);
-BUILD_LOCK_OPS(write, rwlock_t);
+BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
+BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
+BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
#endif /* CONFIG_PREEMPT */