Signed-off-by: Andi Kleen Index: linux/include/asm-x86_64/pda.h =================================================================== --- linux.orig/include/asm-x86_64/pda.h +++ linux/include/asm-x86_64/pda.h @@ -10,10 +10,8 @@ struct x8664_pda { struct task_struct *pcurrent; /* Current process */ unsigned long data_offset; /* Per cpu data offset from linker address */ - struct x8664_pda *me; /* Pointer to itself */ unsigned long kernelstack; /* top of kernel stack for current */ unsigned long oldrsp; /* user rsp for system call */ - unsigned long irqrsp; /* Old rsp for interrupts. */ int irqcount; /* Irq nesting counter. Starts with -1 */ int cpunumber; /* Logical CPU number */ char *irqstackptr; /* top of irqstack */ @@ -42,13 +40,14 @@ extern void __bad_pda_field(void); #define pda_offset(field) offsetof(struct x8664_pda, field) #define pda_to_op(op,field,val) do { \ + typedef typeof_field(struct x8664_pda, field) T__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ -asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 4: \ -asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 8: \ -asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ default: __bad_pda_field(); \ } \ } while (0) @@ -58,7 +57,7 @@ asm volatile(op "q %0,%%gs:%P1"::"r" (va * Unfortunately removing them causes all hell to break lose currently. */ #define pda_from_op(op,field) ({ \ - typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ + typeof_field(struct x8664_pda, field) ret__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ @@ -75,6 +74,7 @@ asm volatile(op "q %%gs:%P1,%0":"=r" (re #define write_pda(field,val) pda_to_op("mov",field,val) #define add_pda(field,val) pda_to_op("add",field,val) #define sub_pda(field,val) pda_to_op("sub",field,val) +#define or_pda(field,val) pda_to_op("or",field,val) #endif Signed-off-by: Andi Kleen Index: linux/include/asm-x86_64/hardirq.h =================================================================== --- linux.orig/include/asm-x86_64/hardirq.h +++ linux/include/asm-x86_64/hardirq.h @@ -9,11 +9,12 @@ #define __ARCH_IRQ_STAT 1 -/* Generate a lvalue for a pda member. Should fix softirq.c instead to use - special access macros. This would generate better code. */ -#define __IRQ_STAT(cpu,member) (read_pda(me)->member) +#define local_softirq_pending() read_pda(__softirq_pending) -#include /* Standard mappings for irq_cpustat_t above */ +#define __ARCH_SET_SOFTIRQ_PENDING 1 + +#define set_softirq_pending(x) write_pda(__softirq_pending, (x)) +#define or_softirq_pending(x) or_pda(__softirq_pending, (x)) /* * 'what should we do if we get a hw irq event on an illegal vector'. Signed-off-by: Andi Kleen Index: linux/arch/x86_64/kernel/setup64.c =================================================================== --- linux.orig/arch/x86_64/kernel/setup64.c +++ linux/arch/x86_64/kernel/setup64.c @@ -119,7 +119,6 @@ void pda_init(int cpu) asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); wrmsrl(MSR_GS_BASE, cpu_pda + cpu); - pda->me = pda; pda->cpunumber = cpu; pda->irqcount = -1; pda->kernelstack = Signed-off-by: Andi Kleen Index: linux/kernel/softirq.c =================================================================== --- linux.orig/kernel/softirq.c +++ linux/kernel/softirq.c @@ -84,7 +84,7 @@ asmlinkage void __do_softirq(void) cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ - local_softirq_pending() = 0; + set_softirq_pending(0); local_irq_enable(); Signed-off-by: Andi Kleen Index: linux/arch/x86_64/kernel/asm-offsets.c =================================================================== --- linux.orig/arch/x86_64/kernel/asm-offsets.c +++ linux/arch/x86_64/kernel/asm-offsets.c @@ -39,7 +39,6 @@ int main(void) ENTRY(kernelstack); ENTRY(oldrsp); ENTRY(pcurrent); - ENTRY(irqrsp); ENTRY(irqcount); ENTRY(cpunumber); ENTRY(irqstackptr); Signed-off-by: Andi Kleen Index: linux/include/linux/interrupt.h =================================================================== --- linux.orig/include/linux/interrupt.h +++ linux/include/linux/interrupt.h @@ -57,6 +57,11 @@ extern void disable_irq(unsigned int irq extern void enable_irq(unsigned int irq); #endif +#ifndef __ARCH_SET_SOFTIRQ_PENDING +#define set_softirq_pending(x) (local_softirq_pending() = (x)) +#define or_softirq_pending(x) (local_softirq_pending() |= (x)) +#endif + /* * Temporary defines for UP kernels, until all code gets fixed. */ @@ -123,7 +128,7 @@ struct softirq_action asmlinkage void do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void softirq_init(void); -#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); extern void FASTCALL(raise_softirq(unsigned int nr));