mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-10-01 01:35:54 -04:00
196 lines
4.8 KiB
Diff
196 lines
4.8 KiB
Diff
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 520cde8..2b6c572 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -967,14 +967,27 @@
|
|
default y
|
|
depends on X86_32
|
|
---help---
|
|
- This option is required by programs like DOSEMU to run 16-bit legacy
|
|
- code on X86 processors. It also may be needed by software like
|
|
- XFree86 to initialize some video cards via BIOS. Disabling this
|
|
- option saves about 6k.
|
|
+ This option is required by programs like DOSEMU to run
|
|
+ 16-bit real mode legacy code on x86 processors. It also may
|
|
+ be needed by software like XFree86 to initialize some video
|
|
+ cards via BIOS. Disabling this option saves about 6K.
|
|
+
|
|
+config X86_16BIT
|
|
+ bool "Enable support for 16-bit segments" if EXPERT
|
|
+ default y
|
|
+ ---help---
|
|
+ This option is required by programs like Wine to run 16-bit
|
|
+ protected mode legacy code on x86 processors. Disabling
|
|
+ this option saves about 300 bytes on i386, or around 6K text
|
|
+ plus 16K runtime memory on x86-64,
|
|
+
|
|
+config X86_ESPFIX32
|
|
+ def_bool y
|
|
+ depends on X86_16BIT && X86_32
|
|
|
|
config X86_ESPFIX64
|
|
def_bool y
|
|
- depends on X86_64
|
|
+ depends on X86_16BIT && X86_64
|
|
|
|
config TOSHIBA
|
|
tristate "Toshiba Laptop support"
|
|
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
|
|
index 41baa1f..e758e2f 100644
|
|
--- a/arch/x86/kernel/entry_32.S
|
|
+++ b/arch/x86/kernel/entry_32.S
|
|
@@ -530,6 +530,7 @@
|
|
restore_all:
|
|
TRACE_IRQS_IRET
|
|
restore_all_notrace:
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
|
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
|
# are returning to the kernel.
|
|
@@ -540,6 +541,7 @@
|
|
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
|
CFI_REMEMBER_STATE
|
|
je ldt_ss # returning to user-space with LDT SS
|
|
+#endif
|
|
restore_nocheck:
|
|
RESTORE_REGS 4 # skip orig_eax/error_code
|
|
irq_return:
|
|
@@ -552,6 +554,7 @@
|
|
.previous
|
|
_ASM_EXTABLE(irq_return,iret_exc)
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
CFI_RESTORE_STATE
|
|
ldt_ss:
|
|
#ifdef CONFIG_PARAVIRT
|
|
@@ -595,6 +598,7 @@
|
|
lss (%esp), %esp /* switch to espfix segment */
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
jmp restore_nocheck
|
|
+#endif
|
|
CFI_ENDPROC
|
|
ENDPROC(system_call)
|
|
|
|
@@ -702,6 +706,7 @@
|
|
* the high word of the segment base from the GDT and swiches to the
|
|
* normal stack and adjusts ESP with the matching offset.
|
|
*/
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
/* fixup the stack */
|
|
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
|
|
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
|
|
@@ -711,8 +716,10 @@
|
|
pushl_cfi %eax
|
|
lss (%esp), %esp /* switch to the normal stack segment */
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
+#endif
|
|
.endm
|
|
.macro UNWIND_ESPFIX_STACK
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
movl %ss, %eax
|
|
/* see if on espfix stack */
|
|
cmpw $__ESPFIX_SS, %ax
|
|
@@ -723,6 +730,7 @@
|
|
/* switch to normal stack */
|
|
FIXUP_ESPFIX_STACK
|
|
27:
|
|
+#endif
|
|
.endm
|
|
|
|
/*
|
|
@@ -1330,11 +1338,13 @@
|
|
ENTRY(nmi)
|
|
RING0_INT_FRAME
|
|
ASM_CLAC
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
pushl_cfi %eax
|
|
movl %ss, %eax
|
|
cmpw $__ESPFIX_SS, %ax
|
|
popl_cfi %eax
|
|
je nmi_espfix_stack
|
|
+#endif
|
|
cmpl $ia32_sysenter_target,(%esp)
|
|
je nmi_stack_fixup
|
|
pushl_cfi %eax
|
|
@@ -1374,6 +1384,7 @@
|
|
FIX_STACK 24, nmi_stack_correct, 1
|
|
jmp nmi_stack_correct
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX32
|
|
nmi_espfix_stack:
|
|
/* We have a RING0_INT_FRAME here.
|
|
*
|
|
@@ -1395,6 +1406,7 @@
|
|
lss 12+4(%esp), %esp # back to espfix stack
|
|
CFI_ADJUST_CFA_OFFSET -24
|
|
jmp irq_return
|
|
+#endif
|
|
CFI_ENDPROC
|
|
END(nmi)
|
|
|
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
|
index 75ccdc1..f9315d9 100644
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -1060,8 +1060,10 @@
|
|
* Are we returning to a stack segment from the LDT? Note: in
|
|
* 64-bit mode SS:RSP on the exception stack is always valid.
|
|
*/
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
testb $4,(SS-RIP)(%rsp)
|
|
jnz irq_return_ldt
|
|
+#endif
|
|
|
|
irq_return_iret:
|
|
INTERRUPT_RETURN
|
|
@@ -1073,6 +1075,7 @@
|
|
_ASM_EXTABLE(native_iret, bad_iret)
|
|
#endif
|
|
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
irq_return_ldt:
|
|
pushq_cfi %rax
|
|
pushq_cfi %rdi
|
|
@@ -1096,6 +1099,7 @@
|
|
movq %rax,%rsp
|
|
popq_cfi %rax
|
|
jmp irq_return_iret
|
|
+#endif
|
|
|
|
.section .fixup,"ax"
|
|
bad_iret:
|
|
@@ -1169,6 +1173,7 @@
|
|
* modify the stack to make it look like we just entered
|
|
* the #GP handler from user space, similar to bad_iret.
|
|
*/
|
|
+#ifdef CONFIG_X86_ESPFIX64
|
|
ALIGN
|
|
__do_double_fault:
|
|
XCPT_FRAME 1 RDI+8
|
|
@@ -1194,6 +1199,9 @@
|
|
retq
|
|
CFI_ENDPROC
|
|
END(__do_double_fault)
|
|
+#else
|
|
+# define __do_double_fault do_double_fault
|
|
+#endif
|
|
|
|
/*
|
|
* End of kprobes section
|
|
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
|
|
index ebc9873..c37886d 100644
|
|
--- a/arch/x86/kernel/ldt.c
|
|
+++ b/arch/x86/kernel/ldt.c
|
|
@@ -229,6 +229,11 @@
|
|
}
|
|
}
|
|
|
|
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
|
+ error = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
fill_ldt(&ldt, &ldt_info);
|
|
if (oldmode)
|
|
ldt.avl = 0;
|