From 23b388c973772e1e5fa418e211ebfb2d4c018dbf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:02 -0500 Subject: [PATCH] x86: preempt-rt preparatory patches for x86 (32bit) commit 2469057e21ee375fc84d93a6c109f17682fc8320 in tip. [PG: drop _raw --> __raw remapping, we now have arch_spin instead. See upstream 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 and 445c89514be242b1b0080056d50bdc1b72adeb5c ] Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- arch/x86/include/asm/tlbflush.h | 2 ++ arch/x86/kernel/apic/nmi.c | 2 ++ arch/x86/kernel/early_printk.c | 2 +- arch/x86/kernel/head64.c | 6 +++++- arch/x86/kernel/process_64.c | 4 +++- arch/x86/kernel/signal.c | 7 +++++++ arch/x86/kernel/smp.c | 10 ++++++++++ 7 files changed, 30 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 7f3eba0..2601600 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -17,7 +17,9 @@ static inline void __native_flush_tlb(void) { + preempt_disable(); native_write_cr3(native_read_cr3()); + preempt_enable(); } static inline void __native_flush_tlb_global(void) diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 1edaf15..4e41ae6 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -92,7 +92,9 @@ static inline unsigned int get_timer_irqs(int cpu) */ static __init void nmi_cpu_busy(void *data) { +#ifndef CONFIG_PREEMPT_RT local_irq_enable_in_hardirq(); +#endif /* * Intentionally don't use cpu_relax here. This is * to make sure that the performance counter really ticks, diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index b9c830c..3c5a9e0 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -166,7 +166,7 @@ static int __initdata early_console_initialized; asmlinkage void early_printk(const char *fmt, ...) { - char buf[512]; + static char buf[512]; int n; va_list ap; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7147143..cd0ef70 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -30,7 +30,11 @@ static void __init zap_identity_mappings(void) { pgd_t *pgd = pgd_offset_k(0UL); pgd_clear(pgd); - __flush_tlb_all(); + /* + * preempt_disable/enable does not work this early in the + * bootup yet: + */ + write_cr3(read_cr3()); } /* Don't add a printk in there. printk relies on the PDA which is not initialized diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index bf82f58..3f90c51 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -146,9 +146,11 @@ void cpu_idle(void) } tick_nohz_restart_sched_tick(); + local_irq_disable(); __preempt_enable_no_resched(); - schedule(); + __schedule(); preempt_disable(); + local_irq_enable(); } } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4fd173c..fccd2c8 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -773,6 +773,13 @@ static void do_signal(struct pt_regs *regs) int signr; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d801210..0b04e39 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -121,6 +121,16 @@ static void native_smp_send_reschedule(int cpu) apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + apic->send_IPI_allbutself(RESCHEDULE_VECTOR); +} + void native_send_call_func_single_ipi(int cpu) { apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); -- 1.7.0.4