summaryrefslogtreecommitdiffstats
path: root/patches/0170-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
blob: 63b9141b4c52053305823f8a3bb75c88331092cd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
From 8afd108cf083ced985c540baa8e8c0e5ac790c2c Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
Subject: [PATCH 170/271] md: raid5: Make raid5_percpu handling RT aware

__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
spews on RT.

Serialize the access to the percpu data with a lock and keep the code
preemptible.

Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
---
 drivers/md/raid5.c |    7 +++++--
 drivers/md/raid5.h |    1 +
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0240576..b6ea794 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1321,8 +1321,9 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 	struct raid5_percpu *percpu;
 	unsigned long cpu;
 
-	cpu = get_cpu();
+	cpu = get_cpu_light();
 	percpu = per_cpu_ptr(conf->percpu, cpu);
+	spin_lock(&percpu->lock);
 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
 		ops_run_biofill(sh);
 		overlap_clear++;
@@ -1374,7 +1375,8 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
 				wake_up(&sh->raid_conf->wait_for_overlap);
 		}
-	put_cpu();
+	spin_unlock(&percpu->lock);
+	put_cpu_light();
 }
 
 #ifdef CONFIG_MULTICORE_RAID456
@@ -4772,6 +4774,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
 			break;
 		}
 		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
+		spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
 	}
 #ifdef CONFIG_HOTPLUG_CPU
 	conf->cpu_notify.notifier_call = raid456_cpu_notify;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8d8e139..a784311 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -417,6 +417,7 @@ struct r5conf {
 	int			recovery_disabled;
 	/* per cpu variables */
 	struct raid5_percpu {
+		spinlock_t	lock;	     /* Protection for -RT */
 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
 		void		*scribble;   /* space for constructing buffer
 					      * lists and performing address
-- 
1.7.10.4