summaryrefslogtreecommitdiffstats
path: root/sched-Debug-missed-preemption-checks.patch
blob: f2060486a64e88a2e3d08b211896388fc0136764 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
From 619d3ebee4886d39a0e8eb3a442a137861266dc8 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jul 2009 10:22:02 +0200
Subject: [PATCH] sched: Debug missed preemption checks

commit 514e0e295511c6a4a54eb0228ccbb519162cc088 in tip.

Developers use preempt_enable_no_resched() in places where the code
calls schedule() immediately which is correct. But there are places
where preempt_enable_no_resched() is not followed by schedule().

Add debug infrastructre to find the offending code. The identified
correct users are converted to use __preempt_enable_no_resched().

For the ever repeating "preempt_enable_no_resched(); schedule();"
sequences a onvenience macro preempt_enable_and_schedule() is
introduced.

Based on a previous patch from Ingo Molnar <mingo@elte.hu>

[PG: kernel/spinlock.c doesn't have the patched lines in 34+]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 include/linux/preempt.h         |   18 ++++++++++++++++--
 include/linux/spinlock_api_up.h |    2 +-
 init/main.c                     |    3 +--
 kernel/mutex.c                  |    3 +--
 kernel/sched.c                  |   18 +++++++++++++++---
 kernel/signal.c                 |    3 +--
 kernel/softirq.c                |    7 +++----
 lib/kernel_lock.c               |    2 +-
 8 files changed, 39 insertions(+), 17 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 2e681d9..5c7dba8 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -33,12 +33,24 @@ do { \
 	barrier(); \
 } while (0)
 
-#define preempt_enable_no_resched() \
+#define __preempt_enable_no_resched() \
 do { \
 	barrier(); \
 	dec_preempt_count(); \
 } while (0)
 
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void notrace preempt_enable_no_resched(void);
+#else
+# define preempt_enable_no_resched() __preempt_enable_no_resched()
+#endif
+
+#define preempt_enable_and_schedule() \
+do { \
+	__preempt_enable_no_resched(); \
+	schedule(); \
+} while (0)
+
 #define preempt_check_resched() \
 do { \
 	if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
@@ -47,7 +59,7 @@ do { \
 
 #define preempt_enable() \
 do { \
-	preempt_enable_no_resched(); \
+	__preempt_enable_no_resched(); \
 	barrier(); \
 	preempt_check_resched(); \
 } while (0)
@@ -84,6 +96,8 @@ do { \
 
 #define preempt_disable()		do { } while (0)
 #define preempt_enable_no_resched()	do { } while (0)
+#define __preempt_enable_no_resched()	do { } while (0)
+#define preempt_enable_and_schedule()	schedule()
 #define preempt_enable()		do { } while (0)
 #define preempt_check_resched()		do { } while (0)
 
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index af1f472..d05112d 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -40,7 +40,7 @@
   do { preempt_enable(); __release(lock); (void)(lock); } while (0)
 
 #define __UNLOCK_BH(lock) \
-  do { preempt_enable_no_resched(); local_bh_enable(); \
+  do { __preempt_enable_no_resched(); local_bh_enable(); \
 	  __release(lock); (void)(lock); } while (0)
 
 #define __UNLOCK_IRQ(lock) \
diff --git a/init/main.c b/init/main.c
index 5c85402..136733a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -440,8 +440,7 @@ static noinline void __init_refok rest_init(void)
 	 * at least once to get things moving:
 	 */
 	init_idle_bootup_task(current);
-	preempt_enable_no_resched();
-	schedule();
+	preempt_enable_and_schedule();
 	preempt_disable();
 
 	/* Call into cpu_idle with preempt disabled */
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 632f04c..90ed15f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -249,8 +249,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
 		/* didnt get the lock, go to sleep: */
 		spin_unlock_mutex(&lock->wait_lock, flags);
-		preempt_enable_no_resched();
-		schedule();
+		preempt_enable_and_schedule();
 		preempt_disable();
 		spin_lock_mutex(&lock->wait_lock, flags);
 	}
diff --git a/kernel/sched.c b/kernel/sched.c
index 07ab8fd..a2c158e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3544,6 +3544,19 @@ notrace unsigned long get_parent_ip(unsigned long addr)
 	return addr;
 }
 
+#ifdef CONFIG_DEBUG_PREEMPT
+void notrace preempt_enable_no_resched(void)
+{
+	barrier();
+	dec_preempt_count();
+
+	WARN_ONCE(!preempt_count(),
+	     KERN_ERR "BUG: %s:%d task might have lost a preemption check!\n",
+	     current->comm, current->pid);
+}
+EXPORT_SYMBOL(preempt_enable_no_resched);
+#endif
+
 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
 				defined(CONFIG_PREEMPT_TRACER))
 
@@ -3764,7 +3777,7 @@ need_resched_nonpreemptible:
 		goto need_resched_nonpreemptible;
 	}
 
-	preempt_enable_no_resched();
+	__preempt_enable_no_resched();
 	if (need_resched())
 		goto need_resched;
 }
@@ -4953,9 +4966,8 @@ SYSCALL_DEFINE0(sched_yield)
 	__release(rq->lock);
 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 	do_raw_spin_unlock(&rq->lock);
-	preempt_enable_no_resched();
 
-	schedule();
+	preempt_enable_and_schedule();
 
 	return 0;
 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 5967e7c..1f90b68 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1658,8 +1658,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
 		 */
 		preempt_disable();
 		read_unlock(&tasklist_lock);
-		preempt_enable_no_resched();
-		schedule();
+		preempt_enable_and_schedule();
 	} else {
 		/*
 		 * By the time we got the lock, our tracer went away.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 55cf435..19ef218 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -308,7 +308,7 @@ void irq_exit(void)
 	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
 		tick_nohz_stop_sched_tick(0);
 #endif
-	preempt_enable_no_resched();
+	__preempt_enable_no_resched();
 }
 
 /*
@@ -699,8 +699,7 @@ static int run_ksoftirqd(void * __bind_cpu)
 	while (!kthread_should_stop()) {
 		preempt_disable();
 		if (!local_softirq_pending()) {
-			preempt_enable_no_resched();
-			schedule();
+			preempt_enable_and_schedule();
 			preempt_disable();
 		}
 
@@ -713,7 +712,7 @@ static int run_ksoftirqd(void * __bind_cpu)
 			if (cpu_is_offline((long)__bind_cpu))
 				goto wait_to_die;
 			do_softirq();
-			preempt_enable_no_resched();
+			__preempt_enable_no_resched();
 			cond_resched();
 			preempt_disable();
 			rcu_sched_qs((long)__bind_cpu);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index b135d04..5354922 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -53,7 +53,7 @@ int __lockfunc __reacquire_kernel_lock(void)
 void __lockfunc __release_kernel_lock(void)
 {
 	do_raw_spin_unlock(&kernel_flag);
-	preempt_enable_no_resched();
+	__preempt_enable_no_resched();
 }
 
 /*
-- 
1.7.0.4