aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorJohn Stultz <jstultz@google.com>2023-02-21 19:02:38 +0000
committerPaul E. McKenney <paulmck@kernel.org>2023-03-07 10:13:57 -0800
commit45bcf0bd8cbe163c5aec18570b6befd2193f1a57 (patch)
tree48c09ba325750b9d2e359059874249f3e9dc9829 /kernel/locking
parentae4823e427954d30ab393888a334f9d1fd8cd597 (diff)
downloadlinux-45bcf0bd8cbe163c5aec18570b6befd2193f1a57.tar.gz
locktorture: With nested locks, occasionally skip main lock
If we're using nested locking to stress things, occasionally skip taking the main lock, so that we can get some different contention patterns between the writers (to hopefully get two disjoint blocked trees) Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: kernel-team@android.com Co-developed-by: Connor O'Brien <connoro@google.com> Signed-off-by: Connor O'Brien <connoro@google.com> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/locktorture.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index c7f264aed5b7a5..9425aff0893657 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -760,6 +760,7 @@ static int lock_torture_writer(void *arg)
int tid = lwsp - cxt.lwsa;
DEFINE_TORTURE_RANDOM(rand);
u32 lockset_mask;
+ bool skip_main_lock;
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
set_user_nice(current, MAX_NICE);
@@ -769,21 +770,37 @@ static int lock_torture_writer(void *arg)
schedule_timeout_uninterruptible(1);
lockset_mask = torture_random(&rand);
+ /*
+ * When using nested_locks, we want to occasionally
+ * skip the main lock so we can avoid always serializing
+ * the lock chains on that central lock. By skipping the
+ * main lock occasionally, we can create different
+ * contention patterns (allowing for multiple disjoint
+ * blocked trees)
+ */
+ skip_main_lock = (nested_locks &&
+ !(torture_random(&rand) % 100));
+
cxt.cur_ops->task_boost(&rand);
if (cxt.cur_ops->nested_lock)
cxt.cur_ops->nested_lock(tid, lockset_mask);
- cxt.cur_ops->writelock(tid);
- if (WARN_ON_ONCE(lock_is_write_held))
- lwsp->n_lock_fail++;
- lock_is_write_held = true;
- if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
- lwsp->n_lock_fail++; /* rare, but... */
- lwsp->n_lock_acquired++;
+ if (!skip_main_lock) {
+ cxt.cur_ops->writelock(tid);
+ if (WARN_ON_ONCE(lock_is_write_held))
+ lwsp->n_lock_fail++;
+ lock_is_write_held = true;
+ if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
+ lwsp->n_lock_fail++; /* rare, but... */
+
+ lwsp->n_lock_acquired++;
+ }
cxt.cur_ops->write_delay(&rand);
- lock_is_write_held = false;
- WRITE_ONCE(last_lock_release, jiffies);
- cxt.cur_ops->writeunlock(tid);
+ if (!skip_main_lock) {
+ lock_is_write_held = false;
+ WRITE_ONCE(last_lock_release, jiffies);
+ cxt.cur_ops->writeunlock(tid);
+ }
if (cxt.cur_ops->nested_unlock)
cxt.cur_ops->nested_unlock(tid, lockset_mask);