From: Zachary Amsden This change encapsulates privileged control register and flags accessors into the sub-architecture layer. The goal is a clean, uniform interface that may be redefined on new sub-architectures of i386. Signed-off-by: Zachary Amsden Signed-off-by: Andrew Morton --- include/asm-i386/mach-default/mach_system.h | 76 ++++++++++++++++++++++++++++ include/asm-i386/system.h | 61 ---------------------- 2 files changed, 77 insertions(+), 60 deletions(-) diff -puN include/asm-i386/mach-default/mach_system.h~i386-transparent-paravirtualization-sub-arch-move-sensitive-system-definitions-into-the-sub-arch-layer include/asm-i386/mach-default/mach_system.h --- devel/include/asm-i386/mach-default/mach_system.h~i386-transparent-paravirtualization-sub-arch-move-sensitive-system-definitions-into-the-sub-arch-layer 2005-08-06 15:01:26.000000000 -0700 +++ devel-akpm/include/asm-i386/mach-default/mach_system.h 2005-08-06 15:01:26.000000000 -0700 @@ -0,0 +1,76 @@ +/* + * include/asm-i386/mach-default/mach_system.h + * + * Copyright (C) 2005, VMware, Inc & other authors + * Moved from include/asm-i386/system.h 07/05 + * + */ + +#ifndef _MACH_SYSTEM_H +#define _MACH_SYSTEM_H + +#define read_cr0() ({ \ + unsigned int __dummy; \ + __asm__ __volatile__( \ + "movl %%cr0,%0\n\t" \ + :"=r" (__dummy)); \ + __dummy; \ +}) +#define write_cr0(x) \ + __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); + +#define read_cr2() ({ \ + unsigned int __dummy; \ + __asm__ __volatile__( \ + "movl %%cr2,%0\n\t" \ + :"=r" (__dummy)); \ + __dummy; \ +}) +#define write_cr2(x) \ + __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); + +#define read_cr3() ({ \ + unsigned int __dummy; \ + __asm__ ( \ + "movl %%cr3,%0\n\t" \ + :"=r" (__dummy)); \ + __dummy; \ +}) +#define write_cr3(x) \ + __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); + +#define read_cr4() ({ \ + unsigned int __dummy; \ + __asm__( \ + "movl %%cr4,%0\n\t" \ + :"=r" (__dummy)); \ + __dummy; \ +}) +#define write_cr4(x) \ + __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); + +/* + * Clear and set 'TS' bit respectively + */ +#define clts() __asm__ __volatile__ ("clts") +#define stts() write_cr0(8 | read_cr0()) + +#define wbinvd() \ + __asm__ __volatile__ ("wbinvd": : :"memory"); + +/* interrupt control.. */ +#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) +#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) +#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") +#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") + +/* For spinlocks etc */ +#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") + +/* used in the idle loop; sti holds off interrupts for 1 instruction */ +#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") + +/* halt until interrupted */ +#define halt() __asm__ __volatile__("hlt") + +#endif diff -puN include/asm-i386/system.h~i386-transparent-paravirtualization-sub-arch-move-sensitive-system-definitions-into-the-sub-arch-layer include/asm-i386/system.h --- devel/include/asm-i386/system.h~i386-transparent-paravirtualization-sub-arch-move-sensitive-system-definitions-into-the-sub-arch-layer 2005-08-06 15:01:26.000000000 -0700 +++ devel-akpm/include/asm-i386/system.h 2005-08-06 15:01:26.000000000 -0700 @@ -99,56 +99,8 @@ static inline unsigned long _get_base(ch #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") -#define read_cr0() ({ \ - unsigned int __dummy; \ - __asm__ __volatile__( \ - "movl %%cr0,%0\n\t" \ - :"=r" (__dummy)); \ - __dummy; \ -}) -#define write_cr0(x) \ - __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); - -#define read_cr2() ({ \ - unsigned int __dummy; \ - __asm__ __volatile__( \ - "movl %%cr2,%0\n\t" \ - :"=r" (__dummy)); \ - __dummy; \ -}) -#define write_cr2(x) \ - __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); - -#define read_cr3() ({ \ - unsigned int __dummy; \ - __asm__ ( \ - "movl %%cr3,%0\n\t" \ - :"=r" (__dummy)); \ - __dummy; \ -}) -#define write_cr3(x) \ - __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); - -#define read_cr4() ({ \ - unsigned int __dummy; \ - __asm__( \ - "movl %%cr4,%0\n\t" \ - :"=r" (__dummy)); \ - __dummy; \ -}) -#define write_cr4(x) \ - __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); -#define stts() write_cr0(8 | read_cr0()) - #endif /* __KERNEL__ */ -#define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory"); - static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; @@ -458,15 +410,7 @@ struct alt_instr { #define set_wmb(var, value) do { var = value; wmb(); } while (0) -/* interrupt control.. */ -#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) -#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() __asm__ __volatile__("hlt": : :"memory") +#include #define irqs_disabled() \ ({ \ @@ -475,9 +419,6 @@ struct alt_instr { !(flags & (1<<9)); \ }) -/* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") - /* * disable hlt during certain critical i/o operations */ _