diff -urN 2.4.19pre7/arch/i386/kernel/traps.c vm86/arch/i386/kernel/traps.c --- 2.4.19pre7/arch/i386/kernel/traps.c Tue Apr 16 08:11:23 2002 +++ vm86/arch/i386/kernel/traps.c Mon Apr 29 15:59:52 2002 @@ -305,8 +305,13 @@ static void inline do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs * regs, long error_code, siginfo_t *info) { - if (vm86 && regs->eflags & VM_MASK) - goto vm86_trap; + if (regs->eflags & VM_MASK) { + if (vm86) + goto vm86_trap; + else + goto trap_signal; + } + if (!(regs->xcs & 3)) goto kernel_trap; diff -urN 2.4.19pre7/arch/i386/kernel/vm86.c vm86/arch/i386/kernel/vm86.c --- 2.4.19pre7/arch/i386/kernel/vm86.c Mon Feb 25 22:05:04 2002 +++ vm86/arch/i386/kernel/vm86.c Mon Apr 29 16:00:13 2002 @@ -2,7 +2,34 @@ * linux/kernel/vm86.c * * Copyright (C) 1994 Linus Torvalds + * + * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 + * stack - Manfred Spraul + * + * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle + * them correctly. Now the emulation will be in a + * consistent state after stackfaults - Kasper Dupont + * + * + * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont + * + * + * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault + * caused by Kasper Dupont's changes - Stas Sergeev + * + * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. + * Kasper Dupont + * + * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. + * Kasper Dupont + * + * 9 apr 2002 - Changed stack access macros to jump to a label + * instead of returning to userspace. This simplifies + * do_int, and is needed by handle_vm6_fault. Kasper + * Dupont + * */ + #include #include #include @@ -290,12 +317,25 @@ regs->eflags &= ~TF_MASK; } +/* It is correct to call set_IF(regs) from the set_vflags_* + * functions. However someone forgot to call clear_IF(regs) + * in the opposite case. + * After the command sequence CLI PUSHF STI POPF you should + * end up with interrups disabled, but you ended up with + * interrupts enabled. + * ( I was testing my own changes, but the only bug I + * could find was in a function I had not changed. ) + * [KD] + */ + static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) { set_flags(VEFLAGS, eflags, current->thread.v86mask); set_flags(regs->eflags, eflags, SAFE_MASK); if (eflags & IF_MASK) set_IF(regs); + else + clear_IF(regs); } static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) @@ -304,6 +344,8 @@ set_flags(regs->eflags, flags, SAFE_MASK); if (flags & IF_MASK) set_IF(regs); + else + clear_IF(regs); } static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) @@ -327,75 +369,187 @@ * Boy are these ugly, but we need to do the correct 16-bit arithmetic. * Gcc makes a mess of it, so we do it inline and use non-obvious calling * conventions.. + * FIXME: is VM86_UNKNOWN really the correct return code? [MS??] + * No that wasn't correct, instead we jump to a label given as + * argument. In do_int the label already existed, in handle_vm86_fault + * it had to be created. Can this be optimized so error handling get + * out of the default execution path by using the address of the + * label as fixup address? [KD] + */ +#define pushb(base, ptr, val, err_label) \ + do { \ + int err; \ + __asm__ __volatile__( \ + "decw %w0\n\t" \ + "1: movb %3,0(%2,%0)\n\t" \ + "xor %1,%1\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl $1,%1\n\t" \ + " jmp 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,3b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (err) \ + : "r" (base), "q" (val), "0" (ptr)); \ + if (err) \ + goto err_label; \ + } while(0) + +#define pushw(base, ptr, val, err_label) \ + do { \ + int err; \ + __asm__ __volatile__( \ + "decw %w0\n\t" \ + "1: movb %h3,0(%2,%0)\n\t" \ + "decw %w0\n\t" \ + "2: movb %b3,0(%2,%0)\n\t" \ + "xor %1,%1\n\t" \ + "3:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "4: movl $1,%1\n\t" \ + " jmp 3b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,4b\n" \ + " .long 2b,4b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (err) \ + : "r" (base), "q" (val), "0" (ptr)); \ + if (err) \ + goto err_label; \ + } while(0) + +#define pushl(base, ptr, val, err_label) \ + do { \ + int err; \ + __asm__ __volatile__( \ + "decw %w0\n\t" \ + "rorl $16,%3\n\t" \ + "1: movb %h3,0(%2,%0)\n\t" \ + "decw %w0\n\t" \ + "2: movb %b3,0(%2,%0)\n\t" \ + "decw %w0\n\t" \ + "rorl $16,%3\n\t" \ + "3: movb %h3,0(%2,%0)\n\t" \ + "decw %w0\n\t" \ + "4: movb %b3,0(%2,%0)\n\t" \ + "xor %1,%1\n\t" \ + "5:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "6: movl $1,%1\n\t" \ + " jmp 5b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,6b\n" \ + " .long 2b,6b\n" \ + " .long 3b,6b\n" \ + " .long 4b,6b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (err) \ + : "r" (base), "q" (val), "0" (ptr)); \ + if (err) \ + goto err_label; \ + } while(0) + +#define popb(base, ptr, err_label) \ + ({ \ + unsigned long __res; \ + unsigned int err; \ + __asm__ __volatile__( \ + "1:movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "xor %3,%3\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl $1,%1\n\t" \ + " jmp 2b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,3b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (base), "=q" (__res), \ + "=r" (err) \ + : "0" (ptr), "1" (base), "2" (0)); \ + if (err) \ + goto err_label; \ + __res; \ + }) + +#define popw(base, ptr, err_label) \ + ({ \ + unsigned long __res; \ + unsigned int err; \ + __asm__ __volatile__( \ + "1:movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "2:movb 0(%1,%0),%h2\n\t" \ + "incw %w0\n\t" \ + "xor %3,%3\n\t" \ + "3:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "4: movl $1,%1\n\t" \ + " jmp 3b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,4b\n" \ + " .long 2b,4b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (base), "=q" (__res), \ + "=r" (err) \ + : "0" (ptr), "1" (base), "2" (0)); \ + if (err) \ + goto err_label; \ + __res; \ + }) + +#define popl(base, ptr, err_label) \ + ({ \ + unsigned long __res; \ + unsigned int err; \ + __asm__ __volatile__( \ + "1:movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "2:movb 0(%1,%0),%h2\n\t" \ + "incw %w0\n\t" \ + "rorl $16,%2\n\t" \ + "3:movb 0(%1,%0),%b2\n\t" \ + "incw %w0\n\t" \ + "4:movb 0(%1,%0),%h2\n\t" \ + "incw %w0\n\t" \ + "rorl $16,%2\n\t" \ + "xor %3,%3\n\t" \ + "5:\n" \ + ".section .fixup,\"ax\"\n\t" \ + "6: movl $1,%1\n\t" \ + " jmp 5b\n\t" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,6b\n" \ + " .long 2b,6b\n" \ + " .long 3b,6b\n" \ + " .long 4b,6b\n" \ + ".previous" \ + : "=r" (ptr), "=r" (base), "=q" (__res), \ + "=r" (err) \ + : "0" (ptr), "1" (base), "2" (0)); \ + if (err) \ + goto err_label; \ + __res; \ + }) + +/* There are so many possible reasons for this function to return + * VM86_INTx, so adding another doesn't bother me. We can expect + * userspace programs to be able to handle it. (Getting a problem + * in userspace is always better than an Oops anyway.) [KD] */ -#define pushb(base, ptr, val) \ -__asm__ __volatile__( \ - "decw %w0\n\t" \ - "movb %2,0(%1,%0)" \ - : "=r" (ptr) \ - : "r" (base), "q" (val), "0" (ptr)) - -#define pushw(base, ptr, val) \ -__asm__ __volatile__( \ - "decw %w0\n\t" \ - "movb %h2,0(%1,%0)\n\t" \ - "decw %w0\n\t" \ - "movb %b2,0(%1,%0)" \ - : "=r" (ptr) \ - : "r" (base), "q" (val), "0" (ptr)) - -#define pushl(base, ptr, val) \ -__asm__ __volatile__( \ - "decw %w0\n\t" \ - "rorl $16,%2\n\t" \ - "movb %h2,0(%1,%0)\n\t" \ - "decw %w0\n\t" \ - "movb %b2,0(%1,%0)\n\t" \ - "decw %w0\n\t" \ - "rorl $16,%2\n\t" \ - "movb %h2,0(%1,%0)\n\t" \ - "decw %w0\n\t" \ - "movb %b2,0(%1,%0)" \ - : "=r" (ptr) \ - : "r" (base), "q" (val), "0" (ptr)) - -#define popb(base, ptr) \ -({ unsigned long __res; \ -__asm__ __volatile__( \ - "movb 0(%1,%0),%b2\n\t" \ - "incw %w0" \ - : "=r" (ptr), "=r" (base), "=q" (__res) \ - : "0" (ptr), "1" (base), "2" (0)); \ -__res; }) - -#define popw(base, ptr) \ -({ unsigned long __res; \ -__asm__ __volatile__( \ - "movb 0(%1,%0),%b2\n\t" \ - "incw %w0\n\t" \ - "movb 0(%1,%0),%h2\n\t" \ - "incw %w0" \ - : "=r" (ptr), "=r" (base), "=q" (__res) \ - : "0" (ptr), "1" (base), "2" (0)); \ -__res; }) - -#define popl(base, ptr) \ -({ unsigned long __res; \ -__asm__ __volatile__( \ - "movb 0(%1,%0),%b2\n\t" \ - "incw %w0\n\t" \ - "movb 0(%1,%0),%h2\n\t" \ - "incw %w0\n\t" \ - "rorl $16,%2\n\t" \ - "movb 0(%1,%0),%b2\n\t" \ - "incw %w0\n\t" \ - "movb 0(%1,%0),%h2\n\t" \ - "incw %w0\n\t" \ - "rorl $16,%2" \ - : "=r" (ptr), "=r" (base), "=q" (__res) \ - : "0" (ptr), "1" (base)); \ -__res; }) - static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp) { unsigned long *intr_ptr, segoffs; @@ -411,9 +565,9 @@ goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; - pushw(ssp, sp, get_vflags(regs)); - pushw(ssp, sp, regs->cs); - pushw(ssp, sp, IP(regs)); + pushw(ssp, sp, get_vflags(regs), cannot_handle); + pushw(ssp, sp, regs->cs, cannot_handle); + pushw(ssp, sp, IP(regs), cannot_handle); regs->cs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; @@ -455,46 +609,54 @@ #define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ - pushw(ssp,sp,popw(ssp,sp) | TF_MASK); -#define VM86_FAULT_RETURN \ + newflags |= TF_MASK +#define VM86_FAULT_RETURN do { \ if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ return_to_32bit(regs, VM86_PICRETURN); \ - return; + return; } while (0) csp = (unsigned char *) (regs->cs << 4); ssp = (unsigned char *) (regs->ss << 4); sp = SP(regs); ip = IP(regs); - switch (popb(csp, ip)) { + switch (popb(csp, ip, simulate_sigsegv)) { /* operand size override */ case 0x66: - switch (popb(csp, ip)) { + switch (popb(csp, ip, simulate_sigsegv)) { /* pushfd */ case 0x9c: + pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; IP(regs) += 2; - pushl(ssp, sp, get_vflags(regs)); VM86_FAULT_RETURN; /* popfd */ case 0x9d: + { + unsigned long newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; IP(regs) += 2; - CHECK_IF_IN_TRAP - set_vflags_long(popl(ssp, sp), regs); + CHECK_IF_IN_TRAP; + set_vflags_long(newflags, regs); VM86_FAULT_RETURN; + } /* iretd */ case 0xcf: + { + unsigned long newip=popl(ssp, sp, simulate_sigsegv); + unsigned long newcs=popl(ssp, sp, simulate_sigsegv); + unsigned long newflags=popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; - IP(regs) = (unsigned short)popl(ssp, sp); - regs->cs = (unsigned short)popl(ssp, sp); - CHECK_IF_IN_TRAP - set_vflags_long(popl(ssp, sp), regs); + IP(regs) = (unsigned short)newip; + regs->cs = (unsigned short)newcs; + CHECK_IF_IN_TRAP; + set_vflags_long(newflags, regs); VM86_FAULT_RETURN; + } /* need this to avoid a fallthrough */ default: return_to_32bit(regs, VM86_UNKNOWN); @@ -502,22 +664,25 @@ /* pushf */ case 0x9c: + pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; IP(regs)++; - pushw(ssp, sp, get_vflags(regs)); VM86_FAULT_RETURN; /* popf */ case 0x9d: + { + unsigned short newflags=popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; IP(regs)++; - CHECK_IF_IN_TRAP - set_vflags_short(popw(ssp, sp), regs); + CHECK_IF_IN_TRAP; + set_vflags_short(newflags, regs); VM86_FAULT_RETURN; + } /* int xx */ case 0xcd: { - int intno=popb(csp, ip); + int intno=popb(csp, ip, simulate_sigsegv); IP(regs) += 2; if (VMPI.vm86dbg_active) { if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) @@ -529,12 +694,17 @@ /* iret */ case 0xcf: + { + unsigned short newip=popw(ssp, sp, simulate_sigsegv); + unsigned short newcs=popw(ssp, sp, simulate_sigsegv); + unsigned short newflags=popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; - IP(regs) = popw(ssp, sp); - regs->cs = popw(ssp, sp); - CHECK_IF_IN_TRAP - set_vflags_short(popw(ssp, sp), regs); + IP(regs) = newip; + regs->cs = newcs; + CHECK_IF_IN_TRAP; + set_vflags_short(newflags, regs); VM86_FAULT_RETURN; + } /* cli */ case 0xfa: @@ -557,6 +727,21 @@ default: return_to_32bit(regs, VM86_UNKNOWN); } + + return; + +simulate_sigsegv: + /* FIXME: After a long discussion with Stas we finally + * agreed, that this is wrong. Here we should + * really send a SIGSEGV to the user program. + * But how do we create the correct context? We + * are inside a general protection fault handler + * and has just returned from a page fault handler. + * The correct context for the signal handler + * should be a mixture of the two, but how do we + * get the information? [KD] + */ + return_to_32bit(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */