summaryrefslogtreecommitdiffstats
path: root/genirq-support-forced-threading-of-interrupts.patch
blob: e7a0283c681219dfa3429557ddca8f3fe825fc63 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
From af301f446d671a152f4cab4327affba2526bbe7f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 16 Jul 2009 12:23:12 +0200
Subject: [PATCH] genirq: support forced threading of interrupts

commit 8baf330d664a262b4e6d42728b40e6161ef02183 in tip.

Based on the mainline infrastructure we force thread all interrupts
with per device threads.

[PG: put HARDIRQ directly into PFE extra_flags; flatten some other
 irq changes buried in merge commits back into this commit.]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 include/linux/interrupt.h |   13 +++++-
 include/linux/irq.h       |    1 +
 include/linux/sched.h     |    1 +
 kernel/irq/chip.c         |    1 +
 kernel/irq/handle.c       |   26 ++++++++++++-
 kernel/irq/manage.c       |   97 +++++++++++++++++++++++++++++++++++++++++---
 kernel/irq/migration.c    |    3 +-
 kernel/sched.c            |    3 +-
 8 files changed, 133 insertions(+), 12 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f89e357..563f51e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -95,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
  * @thread_fn:	interupt handler function for threaded interrupts
  * @thread:	thread pointer for threaded interrupts
  * @thread_flags:	flags related to @thread
+ * @thread_mask:	bit mask to account for forced threads
  */
 struct irqaction {
 	irq_handler_t handler;
@@ -107,6 +108,7 @@ struct irqaction {
 	irq_handler_t thread_fn;
 	struct task_struct *thread;
 	unsigned long thread_flags;
+	unsigned long thread_mask;
 };
 
 extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -323,6 +325,7 @@ static inline int disable_irq_wake(unsigned int irq)
 
 #ifndef __ARCH_SET_SOFTIRQ_PENDING
 #define set_softirq_pending(x) (local_softirq_pending() = (x))
+// FIXME: PREEMPT_RT: set_bit()?
 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
 #endif
 
@@ -371,9 +374,15 @@ struct softirq_action
 	void	(*action)(struct softirq_action *);
 };
 
-#define __raise_softirq_irqoff(nr) \
+#ifdef CONFIG_PREEMPT_HARDIRQS
+# define __raise_softirq_irqoff(nr) raise_softirq_irqoff(nr)
+# define __do_raise_softirq_irqoff(nr) \
 	do { or_softirq_pending(1UL << (nr)); } while (0)
-#define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr)
+#else
+# define __raise_softirq_irqoff(nr) \
+	do { or_softirq_pending(1UL << (nr)); } while (0)
+# define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr)
+#endif
 
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 707ab12..7b0a0d4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -201,6 +201,7 @@ struct irq_desc {
 #endif
 #endif
 	atomic_t		threads_active;
+	unsigned long		forced_threads_active;
 	wait_queue_head_t       wait_for_threads;
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry	*dir;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6a9d432..cb421c8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1736,6 +1736,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 
 /* Flags in the extra_flags field */
 #define PFE_SOFTIRQ	0x00000001	/* softirq context */
+#define PFE_HARDIRQ	0x00000002	/* hardirq thread */
 
 /*
  * Only the _current_ task can read/write to tsk->flags, but other
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b7091d5..f308d6c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -309,6 +309,7 @@ static unsigned int default_startup(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 
+	desc->status &= ~IRQ_MASKED;
 	desc->chip->enable(irq);
 	return 0;
 }
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 76d5a67..e93b055 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -358,6 +358,25 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
 	       "but no thread function available.", irq, action->name);
 }
 
+/*
+ * Momentary workaround until I have a brighter idea how to handle the
+ * accounting of forced threaded (shared) handlers.
+ */
+irqreturn_t handle_irq_action(unsigned int irq, struct irqaction *action)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	if (desc->status & IRQ_ONESHOT) {
+		unsigned long flags;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		desc->forced_threads_active |= action->thread_mask;
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
+		return IRQ_WAKE_THREAD;
+	}
+	return action->handler(irq, action->dev_id);
+}
+
 /**
  * handle_IRQ_event - irq action chain handler
  * @irq:	the interrupt number
@@ -375,7 +394,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
 
 	do {
 		trace_irq_handler_entry(irq, action);
-		ret = action->handler(irq, action->dev_id);
+		ret = handle_irq_action(irq, action);
 		trace_irq_handler_exit(irq, action, ret);
 
 		switch (ret) {
@@ -452,6 +471,11 @@ unsigned int __do_IRQ(unsigned int irq)
 	struct irqaction *action;
 	unsigned int status;
 
+#ifdef CONFIG_PREEMPT_RT
+	printk(KERN_WARNING "__do_IRQ called for irq %d. "
+	       "PREEMPT_RT will crash your system soon\n", irq);
+	printk(KERN_WARNING "I hope you have a fire-extinguisher handy!\n");
+#endif
 	kstat_incr_irqs_this_cpu(irq, desc);
 
 	if (CHECK_IRQ_PER_CPU(desc->status)) {
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 704e488..8fb33fe 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -280,7 +280,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 			goto err_out;
 		/* Prevent probing on this irq: */
 		desc->status = status | IRQ_NOPROBE;
-		check_irq_resend(desc, irq);
+		if (!desc->forced_threads_active)
+			check_irq_resend(desc, irq);
 		/* fall-through */
 	}
 	default:
@@ -465,7 +466,81 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 	return IRQ_NONE;
 }
 
-static int irq_wait_for_interrupt(struct irqaction *action)
+#ifdef CONFIG_PREEMPT_HARDIRQS
+/*
+ * If the caller does not request irq threading then the handler
+ * becomes the thread function and we use the above handler as the
+ * primary hardirq context handler.
+ */
+static void preempt_hardirq_setup(struct irqaction *new)
+{
+	if (new->thread_fn || (new->flags & IRQF_NODELAY))
+		return;
+
+	new->flags |= IRQF_ONESHOT;
+	new->thread_fn = new->handler;
+	new->handler = irq_default_primary_handler;
+}
+
+#else
+static inline void preempt_hardirq_setup(struct irqaction *new) { }
+#endif
+
+/*
+ * forced threaded interrupts need to unmask the interrupt line
+ */
+static int preempt_hardirq_thread_done(struct irq_desc *desc,
+					struct irqaction *action)
+{
+	unsigned long masked;
+
+	if (!(desc->status & IRQ_ONESHOT))
+		return 0;
+again:
+	raw_spin_lock_irq(&desc->lock);
+	/*
+	 * Be careful. The hardirq handler might be running on the
+	 * other CPU.
+	 */
+	if (desc->status & IRQ_INPROGRESS) {
+		raw_spin_unlock_irq(&desc->lock);
+		cpu_relax();
+		goto again;
+	}
+
+	/*
+	 * Now check again, whether the thread should run. Otherwise
+	 * we would clear the forced_threads_active bit which was just
+	 * set.
+	 */
+	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) {
+		raw_spin_unlock_irq(&desc->lock);
+		return 1;
+	}
+
+	masked = desc->forced_threads_active;
+	desc->forced_threads_active &= ~action->thread_mask;
+
+	/*
+	 * Unmask the interrupt line when this is the last active
+	 * thread and the interrupt is not disabled.
+	 */
+	if (masked && !desc->forced_threads_active &&
+	    !(desc->status & IRQ_DISABLED)) {
+		if (desc->chip->unmask)
+			desc->chip->unmask(action->irq);
+		/*
+		 * Do we need to call check_irq_resend() here ?
+		 * No. check_irq_resend needs only to be checked when
+		 * we go from IRQ_DISABLED to IRQ_ENABLED state.
+		 */
+	}
+	raw_spin_unlock_irq(&desc->lock);
+	return 0;
+}
+
+static int
+irq_wait_for_interrupt(struct irq_desc *desc, struct irqaction *action)
 {
 	while (!kthread_should_stop()) {
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -559,9 +634,10 @@ static int irq_thread(void *data)
 	int wake, oneshot = desc->status & IRQ_ONESHOT;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
+	current->extra_flags |= PFE_HARDIRQ;
 	current->irqaction = action;
 
-	while (!irq_wait_for_interrupt(action)) {
+	while (!irq_wait_for_interrupt(desc, action)) {
 
 		irq_thread_check_affinity(desc, action);
 
@@ -631,7 +707,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 {
 	struct irqaction *old, **old_ptr;
 	const char *old_name = NULL;
-	unsigned long flags;
+	unsigned long flags, thread_mask = 0;
 	int nested, shared = 0;
 	int ret;
 
@@ -657,9 +733,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		rand_initialize_irq(irq);
 	}
 
-	/* Oneshot interrupts are not allowed with shared */
-	if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
-		return -EINVAL;
+	/* Preempt-RT setup for forced threading */
+	preempt_hardirq_setup(new);
 
 	/*
 	 * Check whether the interrupt nests into another interrupt
@@ -726,12 +801,20 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
 		/* add new interrupt at end of irq queue */
 		do {
+			thread_mask |= old->thread_mask;
 			old_ptr = &old->next;
 			old = *old_ptr;
 		} while (old);
 		shared = 1;
 	}
 
+	/*
+	 * Setup the thread mask for this irqaction. No risk that ffz
+	 * will fail. If we have 32 resp. 64 devices sharing one irq
+	 * then .....
+	 */
+	new->thread_mask = 1 << ffz(thread_mask);
+
 	if (!shared) {
 		irq_chip_set_defaults(desc->chip);
 
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 4817563..ea0b492 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -66,7 +66,8 @@ void move_native_irq(int irq)
 	 * If the irq is already in progress, it should be masked.
 	 * If we unmask it, we might cause an interrupt storm on RT.
 	 */
-	if (unlikely(desc->status & IRQ_INPROGRESS))
+	if (unlikely((desc->status & IRQ_INPROGRESS) ||
+		     desc->forced_threads_active))
 		mask = 0;
 
 	if (mask)
diff --git a/kernel/sched.c b/kernel/sched.c
index 86aeeb6..b611e03 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3340,7 +3340,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
 
 	/* Add system time to cpustat. */
 	tmp = cputime_to_cputime64(cputime);
-	if (hardirq_count() - hardirq_offset)
+	if ((hardirq_count() - hardirq_offset) ||
+	    (p->extra_flags & PFE_HARDIRQ))
 		cpustat->irq = cputime64_add(cpustat->irq, tmp);
 	else if (softirq_count() || (p->extra_flags & PFE_SOFTIRQ))
 		cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
-- 
1.7.0.4