summaryrefslogtreecommitdiffstats
path: root/x86-preempt-rt-preparatory-patches-for-x86-32bit.patch
blob: 748815eaa22accea9f4a3d1dbf2cd71f20fd5810 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
From 23b388c973772e1e5fa418e211ebfb2d4c018dbf Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:02 -0500
Subject: [PATCH] x86: preempt-rt preparatory patches for x86 (32bit)

commit 2469057e21ee375fc84d93a6c109f17682fc8320 in tip.

[PG: drop _raw --> __raw remapping, we now have arch_spin instead.
See upstream 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 and
	445c89514be242b1b0080056d50bdc1b72adeb5c ]

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 arch/x86/include/asm/tlbflush.h |    2 ++
 arch/x86/kernel/apic/nmi.c      |    2 ++
 arch/x86/kernel/early_printk.c  |    2 +-
 arch/x86/kernel/head64.c        |    6 +++++-
 arch/x86/kernel/process_64.c    |    4 +++-
 arch/x86/kernel/signal.c        |    7 +++++++
 arch/x86/kernel/smp.c           |   10 ++++++++++
 7 files changed, 30 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 7f3eba0..2601600 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -17,7 +17,9 @@
 
 static inline void __native_flush_tlb(void)
 {
+	preempt_disable();
 	native_write_cr3(native_read_cr3());
+	preempt_enable();
 }
 
 static inline void __native_flush_tlb_global(void)
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 1edaf15..4e41ae6 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -92,7 +92,9 @@ static inline unsigned int get_timer_irqs(int cpu)
  */
 static __init void nmi_cpu_busy(void *data)
 {
+#ifndef CONFIG_PREEMPT_RT
 	local_irq_enable_in_hardirq();
+#endif
 	/*
 	 * Intentionally don't use cpu_relax here. This is
 	 * to make sure that the performance counter really ticks,
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index b9c830c..3c5a9e0 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -166,7 +166,7 @@ static int __initdata early_console_initialized;
 
 asmlinkage void early_printk(const char *fmt, ...)
 {
-	char buf[512];
+	static char buf[512];
 	int n;
 	va_list ap;
 
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 7147143..cd0ef70 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -30,7 +30,11 @@ static void __init zap_identity_mappings(void)
 {
 	pgd_t *pgd = pgd_offset_k(0UL);
 	pgd_clear(pgd);
-	__flush_tlb_all();
+	/*
+	 * preempt_disable/enable does not work this early in the
+	 * bootup yet:
+	 */
+	write_cr3(read_cr3());
 }
 
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bf82f58..3f90c51 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -146,9 +146,11 @@ void cpu_idle(void)
 		}
 
 		tick_nohz_restart_sched_tick();
+		local_irq_disable();
 		__preempt_enable_no_resched();
-		schedule();
+		__schedule();
 		preempt_disable();
+		local_irq_enable();
 	}
 }
 
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 4fd173c..fccd2c8 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -773,6 +773,13 @@ static void do_signal(struct pt_regs *regs)
 	int signr;
 	sigset_t *oldset;
 
+#ifdef CONFIG_PREEMPT_RT
+	/*
+	 * Fully-preemptible kernel does not need interrupts disabled:
+	 */
+	local_irq_enable();
+	preempt_check_resched();
+#endif
 	/*
 	 * We want the common case to go fast, which is why we may in certain
 	 * cases get here from kernel mode. Just return without doing anything
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210..0b04e39 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -121,6 +121,16 @@ static void native_smp_send_reschedule(int cpu)
 	apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
 }
 
+/*
+ * this function sends a 'reschedule' IPI to all other CPUs.
+ * This is used when RT tasks are starving and other CPUs
+ * might be able to run them:
+ */
+void smp_send_reschedule_allbutself(void)
+{
+	apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
+}
+
 void native_send_call_func_single_ipi(int cpu)
 {
 	apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
-- 
1.7.0.4