From 88d5d415836b9a009b3ba84af2d0a9e9837e1c2e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 Subject: [PATCH] raid5: Make raid5_percpu handling RT aware commit d980cb7b0412055e7a8b561bf111284536444101 in tip. __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic spews on RT. Serialize the access to the percpu data with a lock and keep the code preemptible. Reported-by: Udo van den Heuvel Signed-off-by: Thomas Gleixner Tested-by: Udo van den Heuvel Signed-off-by: Paul Gortmaker --- drivers/md/raid5.c | 5 +++-- drivers/md/raid5.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15348c3..9990bae 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1150,8 +1150,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu(); + cpu = raw_smp_processor_id(); percpu = per_cpu_ptr(conf->percpu, cpu); + spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -1203,7 +1204,7 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - put_cpu(); + spin_unlock(&percpu->lock); } #ifdef CONFIG_MULTICORE_RAID456 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 0f86f5e..f3d1515 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -400,6 +400,7 @@ struct raid5_private_data { */ /* per cpu variables */ struct raid5_percpu { + spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ void *scribble; /* space for constructing buffer * lists and performing address -- 1.7.0.4