From: Brian Gerst The current code disables sysenter when first entering vm86 mode, but does not disable it again when coming back to a vm86 task after a task switch. 25-akpm/arch/i386/kernel/cpu/common.c | 2 +- 25-akpm/arch/i386/kernel/process.c | 2 +- 25-akpm/arch/i386/kernel/vm86.c | 9 ++++++--- 25-akpm/include/asm-i386/processor.h | 20 +++++++------------- 4 files changed, 15 insertions(+), 18 deletions(-) diff -puN arch/i386/kernel/cpu/common.c~vm86-sysenter-fix arch/i386/kernel/cpu/common.c --- 25/arch/i386/kernel/cpu/common.c~vm86-sysenter-fix Wed Oct 29 12:16:36 2003 +++ 25-akpm/arch/i386/kernel/cpu/common.c Wed Oct 29 12:16:36 2003 @@ -510,7 +510,7 @@ void __init cpu_init (void) BUG(); enter_lazy_tlb(&init_mm, current); - load_esp0(t, thread->esp0); + load_esp0(t, thread); set_tss_desc(cpu,t); cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff; load_TR_desc(); diff -puN arch/i386/kernel/process.c~vm86-sysenter-fix arch/i386/kernel/process.c --- 25/arch/i386/kernel/process.c~vm86-sysenter-fix Wed Oct 29 12:16:36 2003 +++ 25-akpm/arch/i386/kernel/process.c Wed Oct 29 12:16:36 2003 @@ -507,7 +507,7 @@ struct task_struct * __switch_to(struct /* * Reload esp0, LDT and the page table pointer: */ - load_esp0(tss, next->esp0); + load_esp0(tss, next); /* * Load the per-thread Thread-Local Storage descriptor. diff -puN arch/i386/kernel/vm86.c~vm86-sysenter-fix arch/i386/kernel/vm86.c --- 25/arch/i386/kernel/vm86.c~vm86-sysenter-fix Wed Oct 29 12:16:36 2003 +++ 25-akpm/arch/i386/kernel/vm86.c Wed Oct 29 12:16:36 2003 @@ -119,7 +119,8 @@ struct pt_regs * save_v86_state(struct k tss = init_tss + get_cpu(); current->thread.esp0 = current->thread.saved_esp0; - load_esp0(tss, current->thread.esp0); + current->thread.sysenter_cs = __KERNEL_CS; + load_esp0(tss, ¤t->thread); current->thread.saved_esp0 = 0; put_cpu(); @@ -296,8 +297,10 @@ static void do_sys_vm86(struct kernel_vm asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); tss = init_tss + get_cpu(); - tss->esp0 = tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; - disable_sysenter(tss); + tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; + if (cpu_has_sep) + tsk->thread.sysenter_cs = 0; + load_esp0(tss, &tsk->thread); put_cpu(); tsk->thread.screen_bitmap = info->screen_bitmap; diff -puN include/asm-i386/processor.h~vm86-sysenter-fix include/asm-i386/processor.h --- 25/include/asm-i386/processor.h~vm86-sysenter-fix Wed Oct 29 12:16:36 2003 +++ 25-akpm/include/asm-i386/processor.h Wed Oct 29 12:16:36 2003 @@ -407,6 +407,7 @@ struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; + unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; @@ -428,6 +429,7 @@ struct thread_struct { #define INIT_THREAD { \ .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } @@ -447,21 +449,13 @@ struct thread_struct { .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } -static inline void load_esp0(struct tss_struct *tss, unsigned long esp0) +static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) { - tss->esp0 = esp0; + tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if ((unlikely(tss->ss1 != __KERNEL_CS))) { - tss->ss1 = __KERNEL_CS; - wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); - } -} - -static inline void disable_sysenter(struct tss_struct *tss) -{ - if (cpu_has_sep) { - tss->ss1 = 0; - wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); + if (unlikely(tss->ss1 != thread->sysenter_cs)) { + tss->ss1 = thread->sysenter_cs; + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } _