diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2014-04-28 23:38:18 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-04-28 23:38:18 -0700 |
commit | cba5534c638dc0e2984cb38fa148332780260b2e (patch) | |
tree | be903f3c420d5301c282908f459b265865de0bbe | |
parent | bd6ff3cacb35d5ee782ca334ca96768ac56363a9 (diff) | |
download | espfix64-espfix-clean.tar.gz |
x86: Make support for 16-bit segments a config optionespfix-clean
Embedded systems and the like are likely never going to experience
16-bit software, so make it an option to reject them entirely and not
carry any of the overhead.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/Kconfig | 8 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 13 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 6 |
3 files changed, 22 insertions, 5 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 25d2c6f7325e8..f1da8b17865bc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -914,6 +914,14 @@ config VM86 XFree86 to initialize some video cards via BIOS. Disabling this option saves about 6k. +config X86_16BIT + bool "Enable 16-bit segments" if EXPERT + default y + ---help--- + This option enables the execution of 16-bit protected mode + software. This is primarily used to run Win16 software + under Wine and 16-bit extended DOS programs under DOSEMU. + config TOSHIBA tristate "Toshiba Laptop support" depends on X86_32 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 2780b8f3b96cb..30dd6e5058f62 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -527,6 +527,7 @@ syscall_exit: restore_all: TRACE_IRQS_IRET restore_all_notrace: +#ifdef CONFIG_X86_16BIT movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS # Warning: PT_OLDSS(%esp) contains the wrong/random values if we # are returning to the kernel. @@ -537,6 +538,7 @@ restore_all_notrace: cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax CFI_REMEMBER_STATE je ldt_ss # returning to user-space with LDT SS +#endif restore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code irq_return: @@ -549,6 +551,7 @@ ENTRY(iret_exc) .previous _ASM_EXTABLE(irq_return,iret_exc) +#ifdef CONFIG_X86_16BIT CFI_RESTORE_STATE ldt_ss: #ifdef CONFIG_PARAVIRT @@ -592,6 +595,7 @@ ldt_ss: lss (%esp), %esp /* switch to espfix segment */ CFI_ADJUST_CFA_OFFSET -8 jmp restore_nocheck +#endif /* CONFIG_X86_16BIT */ CFI_ENDPROC ENDPROC(system_call) @@ -692,6 +696,7 @@ END(syscall_badsys) .popsection .macro FIXUP_ESPFIX_STACK +#ifdef CONFIG_X86_16BIT /* * Switch back for ESPFIX stack to the normal zerobased stack * @@ -708,8 +713,10 @@ END(syscall_badsys) pushl_cfi %eax lss (%esp), %esp /* switch to the normal stack segment */ CFI_ADJUST_CFA_OFFSET -8 +#endif .endm .macro UNWIND_ESPFIX_STACK +#ifdef CONFIG_X86_16BIT movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax @@ -720,6 +727,7 @@ END(syscall_badsys) /* switch to normal stack */ FIXUP_ESPFIX_STACK 27: +#endif .endm /* @@ -1350,11 +1358,13 @@ END(debug) ENTRY(nmi) RING0_INT_FRAME ASM_CLAC +#ifdef CONFIG_X86_16BIT pushl_cfi %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl_cfi %eax je nmi_espfix_stack +#endif cmpl $ia32_sysenter_target,(%esp) je nmi_stack_fixup pushl_cfi %eax @@ -1394,6 +1404,7 @@ nmi_debug_stack_check: FIX_STACK 24, nmi_stack_correct, 1 jmp nmi_stack_correct +#ifdef CONFIG_X86_16BIT nmi_espfix_stack: /* We have a RING0_INT_FRAME here. * @@ -1415,6 +1426,8 @@ nmi_espfix_stack: lss 12+4(%esp), %esp # back to espfix stack CFI_ADJUST_CFA_OFFSET -24 jmp irq_return +#endif /* CONFIG_X86_16BIT */ + CFI_ENDPROC END(nmi) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index af1d14a9ebdae..cc34ae89a8350 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -229,11 +229,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } } - /* - * On x86-64 we do not support 16-bit segments due to - * IRET leaking the high bits of the kernel stack address. - */ -#ifdef CONFIG_X86_64 +#ifndef CONFIG_X86_16BIT if (!ldt_info.seg_32bit) { error = -EINVAL; goto out_unlock; |