summaryrefslogtreecommitdiffstats
path: root/x86-Convert-mce-timer-to-hrtimer.patch
blob: 0f9a9d045502962b396a401d63c2939f20931eff (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
From 861fa3d321111b888dd898156b65da536d3e31ff Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 13 Dec 2010 16:33:39 +0100
Subject: [PATCH] x86: Convert mce timer to hrtimer

commit 76bdf0c3151369f5b15af4baac2708e59347d664 in tip.

mce_timer is started in atomic contexts of cpu bringup. This results
in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
avoid this.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 arch/x86/kernel/cpu/mcheck/mce.c |   49 +++++++++++++++++--------------------
 1 files changed, 23 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8a6f0af..99a87e2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -36,6 +36,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/debugfs.h>
+#include <linux/jiffies.h>
 
 #include <asm/processor.h>
 #include <asm/hw_irq.h>
@@ -1139,17 +1140,14 @@ void mce_log_therm_throt_event(__u64 status)
  * poller finds an MCE, poll 2x faster.  When the poller finds no more
  * errors, poll 2x slower (up to check_interval seconds).
  */
-static int check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
 
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
-static DEFINE_PER_CPU(struct timer_list, mce_timer);
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
 
-static void mce_start_timer(unsigned long data)
+static enum hrtimer_restart mce_start_timer(struct hrtimer *timer)
 {
-	struct timer_list *t = &per_cpu(mce_timer, data);
-	int *n;
-
-	WARN_ON(smp_processor_id() != data);
+	unsigned long *n;
 
 	if (mce_available(&current_cpu_data)) {
 		machine_check_poll(MCP_TIMESTAMP,
@@ -1162,12 +1160,13 @@ static void mce_start_timer(unsigned long data)
 	 */
 	n = &__get_cpu_var(mce_next_interval);
 	if (mce_notify_irq())
-		*n = max(*n/2, HZ/100);
+		*n = max(*n/2, HZ/100UL);
 	else
-		*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
+		*n = min(*n*2, round_jiffies_relative(check_interval*HZ));
 
-	t->expires = jiffies + *n;
-	add_timer_on(t, smp_processor_id());
+	hrtimer_forward(timer, timer->base->get_time(),
+			ns_to_ktime(jiffies_to_usecs(*n) * 1000));
+	return HRTIMER_RESTART;
 }
 
 static void mce_do_trigger(struct work_struct *work)
@@ -1393,10 +1392,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
 
 static void __mcheck_cpu_init_timer(void)
 {
-	struct timer_list *t = &__get_cpu_var(mce_timer);
-	int *n = &__get_cpu_var(mce_next_interval);
+	struct hrtimer *t = &__get_cpu_var(mce_timer);
+	unsigned long *n = &__get_cpu_var(mce_next_interval);
 
-	setup_timer(t, mce_start_timer, smp_processor_id());
+	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	t->function = mce_start_timer;
 
 	if (mce_ignore_ce)
 		return;
@@ -1404,8 +1404,9 @@ static void __mcheck_cpu_init_timer(void)
 	*n = check_interval * HZ;
 	if (!*n)
 		return;
-	t->expires = round_jiffies(jiffies + *n);
-	add_timer_on(t, smp_processor_id());
+
+	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000),
+			       0 , HRTIMER_MODE_REL_PINNED);
 }
 
 /* Handle unconfigured int18 (should never happen) */
@@ -1717,7 +1718,7 @@ static int mce_resume(struct sys_device *dev)
 
 static void mce_cpu_restart(void *data)
 {
-	del_timer_sync(&__get_cpu_var(mce_timer));
+	hrtimer_cancel(&__get_cpu_var(mce_timer));
 	if (!mce_available(&current_cpu_data))
 		return;
 	__mcheck_cpu_init_generic();
@@ -1736,7 +1737,7 @@ static void mce_disable_ce(void *all)
 	if (!mce_available(&current_cpu_data))
 		return;
 	if (all)
-		del_timer_sync(&__get_cpu_var(mce_timer));
+		hrtimer_cancel(&__get_cpu_var(mce_timer));
 	cmci_clear();
 }
 
@@ -1968,6 +1969,8 @@ static void __cpuinit mce_disable_cpu(void *h)
 	if (!mce_available(&current_cpu_data))
 		return;
 
+	hrtimer_cancel(&__get_cpu_var(mce_timer));
+
 	if (!(action & CPU_TASKS_FROZEN))
 		cmci_clear();
 	for (i = 0; i < banks; i++) {
@@ -1994,6 +1997,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
 		if (b->init)
 			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
 	}
+	__mcheck_cpu_init_timer();
 }
 
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -2001,7 +2005,6 @@ static int __cpuinit
 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
 
 	switch (action) {
 	case CPU_ONLINE:
@@ -2018,16 +2021,10 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 		break;
 	case CPU_DOWN_PREPARE:
 	case CPU_DOWN_PREPARE_FROZEN:
-		del_timer_sync(t);
 		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
 		break;
 	case CPU_DOWN_FAILED:
 	case CPU_DOWN_FAILED_FROZEN:
-		if (!mce_ignore_ce && check_interval) {
-			t->expires = round_jiffies(jiffies +
-					   __get_cpu_var(mce_next_interval));
-			add_timer_on(t, cpu);
-		}
 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
 		break;
 	case CPU_POST_DEAD:
-- 
1.7.0.4