From: Zachary Amsden This change encapsulates TLB flush accessors into the sub-architecture layer. Signed-off-by: Zachary Amsden Signed-off-by: Andrew Morton --- include/asm-i386/mach-default/mach_tlbflush.h | 47 ++++++++++++++++++++++++++ include/asm-i386/tlbflush.h | 36 ------------------- 2 files changed, 48 insertions(+), 35 deletions(-) diff -puN include/asm-i386/mach-default/mach_tlbflush.h~i386-transparent-paravirtualization-sub-arch-move-tlb-flush-definitions-to-the-sub-architecture-level include/asm-i386/mach-default/mach_tlbflush.h --- devel/include/asm-i386/mach-default/mach_tlbflush.h~i386-transparent-paravirtualization-sub-arch-move-tlb-flush-definitions-to-the-sub-architecture-level 2005-08-06 15:01:31.000000000 -0700 +++ devel-akpm/include/asm-i386/mach-default/mach_tlbflush.h 2005-08-06 15:01:31.000000000 -0700 @@ -0,0 +1,47 @@ +/* + * include/asm-i386/mach-default/mach_tlbflush.h + * + * Standard TLB accessors for running on real hardware + * Moved from include/asm-i386/tlbflush.h 07/05 + * + */ + +#ifndef _MACH_TLBFLUSH_H +#define _MACH_TLBFLUSH_H + +#define __flush_tlb() \ + do { \ + unsigned int tmpreg; \ + \ + __asm__ __volatile__( \ + "movl %%cr3, %0; \n" \ + "movl %0, %%cr3; # flush TLB \n" \ + : "=r" (tmpreg) \ + :: "memory"); \ + } while (0) + +/* + * Global pages have to be flushed a bit differently. Not a real + * performance problem because this does not happen often. + */ +#define __flush_tlb_global() \ + do { \ + unsigned int tmpreg, cr4, cr4_orig; \ + \ + __asm__ __volatile__( \ + "movl %%cr4, %2; # turn off PGE \n" \ + "movl %2, %1; \n" \ + "andl %3, %1; \n" \ + "movl %1, %%cr4; \n" \ + "movl %%cr3, %0; \n" \ + "movl %0, %%cr3; # flush TLB \n" \ + "movl %2, %%cr4; # turn PGE back on \n" \ + : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ + : "i" (~X86_CR4_PGE) \ + : "memory"); \ + } while (0) + +#define __flush_tlb_single(addr) \ + __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) + +#endif /* _MACH_TLBFLUSH_H */ diff -puN include/asm-i386/tlbflush.h~i386-transparent-paravirtualization-sub-arch-move-tlb-flush-definitions-to-the-sub-architecture-level include/asm-i386/tlbflush.h --- devel/include/asm-i386/tlbflush.h~i386-transparent-paravirtualization-sub-arch-move-tlb-flush-definitions-to-the-sub-architecture-level 2005-08-06 15:01:31.000000000 -0700 +++ devel-akpm/include/asm-i386/tlbflush.h 2005-08-06 15:01:31.000000000 -0700 @@ -4,38 +4,7 @@ #include #include #include - -#define __flush_tlb() \ - do { \ - unsigned int tmpreg; \ - \ - __asm__ __volatile__( \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - : "=r" (tmpreg) \ - :: "memory"); \ - } while (0) - -/* - * Global pages have to be flushed a bit differently. Not a real - * performance problem because this does not happen often. - */ -#define __flush_tlb_global() \ - do { \ - unsigned int tmpreg, cr4, cr4_orig; \ - \ - __asm__ __volatile__( \ - "movl %%cr4, %2; # turn off PGE \n" \ - "movl %2, %1; \n" \ - "andl %3, %1; \n" \ - "movl %1, %%cr4; \n" \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - "movl %2, %%cr4; # turn PGE back on \n" \ - : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ - : "i" (~X86_CR4_PGE) \ - : "memory"); \ - } while (0) +#include extern unsigned long pgkern_mask; @@ -49,9 +18,6 @@ extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) -#define __flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) - #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else _