summaryrefslogtreecommitdiffstats
path: root/patches/0201-x86-Convert-mce-timer-to-hrtimer.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0201-x86-Convert-mce-timer-to-hrtimer.patch')
-rw-r--r--patches/0201-x86-Convert-mce-timer-to-hrtimer.patch150
1 files changed, 150 insertions, 0 deletions
diff --git a/patches/0201-x86-Convert-mce-timer-to-hrtimer.patch b/patches/0201-x86-Convert-mce-timer-to-hrtimer.patch
new file mode 100644
index 0000000..f90d485
--- /dev/null
+++ b/patches/0201-x86-Convert-mce-timer-to-hrtimer.patch
@@ -0,0 +1,150 @@
+From 367f65888c6a9acc8ce518f561c706576967828f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 13 Dec 2010 16:33:39 +0100
+Subject: [PATCH 201/254] x86: Convert mce timer to hrtimer
+
+mce_timer is started in atomic contexts of cpu bringup. This results
+in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
+avoid this.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 49 ++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 26 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 61604ae..473e40d 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -38,6 +38,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+@@ -1245,17 +1246,14 @@ void mce_log_therm_throt_event(__u64 status)
+ * poller finds an MCE, poll 2x faster. When the poller finds no more
+ * errors, poll 2x slower (up to check_interval seconds).
+ */
+-static int check_interval = 5 * 60; /* 5 minutes */
++static unsigned long check_interval = 5 * 60; /* 5 minutes */
+
+-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+-static void mce_start_timer(unsigned long data)
++static enum hrtimer_restart mce_start_timer(struct hrtimer *timer)
+ {
+- struct timer_list *t = &per_cpu(mce_timer, data);
+- int *n;
+-
+- WARN_ON(smp_processor_id() != data);
++ unsigned long *n;
+
+ if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ machine_check_poll(MCP_TIMESTAMP,
+@@ -1268,21 +1266,22 @@ static void mce_start_timer(unsigned long data)
+ */
+ n = &__get_cpu_var(mce_next_interval);
+ if (mce_notify_irq())
+- *n = max(*n/2, HZ/100);
++ *n = max(*n/2, HZ/100UL);
+ else
+- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
++ *n = min(*n*2, round_jiffies_relative(check_interval*HZ));
+
+- t->expires = jiffies + *n;
+- add_timer_on(t, smp_processor_id());
++ hrtimer_forward(timer, timer->base->get_time(),
++ ns_to_ktime(jiffies_to_usecs(*n) * 1000));
++ return HRTIMER_RESTART;
+ }
+
+-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+ static void mce_timer_delete_all(void)
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1512,10 +1511,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = &__get_cpu_var(mce_timer);
+- int *n = &__get_cpu_var(mce_next_interval);
++ struct hrtimer *t = &__get_cpu_var(mce_timer);
++ unsigned long *n = &__get_cpu_var(mce_next_interval);
+
+- setup_timer(t, mce_start_timer, smp_processor_id());
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_start_timer;
+
+ if (mce_ignore_ce)
+ return;
+@@ -1523,8 +1523,9 @@ static void __mcheck_cpu_init_timer(void)
+ *n = check_interval * HZ;
+ if (!*n)
+ return;
+- t->expires = round_jiffies(jiffies + *n);
+- add_timer_on(t, smp_processor_id());
++
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000),
++ 0 , HRTIMER_MODE_REL_PINNED);
+ }
+
+ /* Handle unconfigured int18 (should never happen) */
+@@ -2176,6 +2177,8 @@ static void __cpuinit mce_disable_cpu(void *h)
+ if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ return;
+
++ hrtimer_cancel(&__get_cpu_var(mce_timer));
++
+ if (!(action & CPU_TASKS_FROZEN))
+ cmci_clear();
+ for (i = 0; i < banks; i++) {
+@@ -2202,6 +2205,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
+ if (b->init)
+ wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ }
++ __mcheck_cpu_init_timer();
+ }
+
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2209,7 +2213,6 @@ static int __cpuinit
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+- struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ switch (action) {
+ case CPU_ONLINE:
+@@ -2226,16 +2229,10 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+- del_timer_sync(t);
+ smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+- if (!mce_ignore_ce && check_interval) {
+- t->expires = round_jiffies(jiffies +
+- __get_cpu_var(mce_next_interval));
+- add_timer_on(t, cpu);
+- }
+ smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+ break;
+ case CPU_POST_DEAD:
+--
+1.7.10.4
+