mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-12-11 08:54:28 -05:00
106 lines
2.9 KiB
Diff
106 lines
2.9 KiB
Diff
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
|
|
index bba3cf8..0a8b519 100644
|
|
--- a/arch/x86/include/asm/irqflags.h
|
|
+++ b/arch/x86/include/asm/irqflags.h
|
|
@@ -129,7 +129,7 @@
|
|
|
|
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
|
|
|
-#define INTERRUPT_RETURN iretq
|
|
+#define INTERRUPT_RETURN jmp native_iret
|
|
#define USERGS_SYSRET64 \
|
|
swapgs; \
|
|
sysretq;
|
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
|
index f9315d9..db230f8 100644
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -1056,27 +1056,24 @@
|
|
RESTORE_ARGS 1,8,1
|
|
|
|
irq_return:
|
|
+ INTERRUPT_RETURN
|
|
+
|
|
+ENTRY(native_iret)
|
|
/*
|
|
* Are we returning to a stack segment from the LDT? Note: in
|
|
* 64-bit mode SS:RSP on the exception stack is always valid.
|
|
*/
|
|
#ifdef CONFIG_X86_ESPFIX64
|
|
testb $4,(SS-RIP)(%rsp)
|
|
- jnz irq_return_ldt
|
|
+ jnz native_irq_return_ldt
|
|
#endif
|
|
|
|
-irq_return_iret:
|
|
- INTERRUPT_RETURN
|
|
- _ASM_EXTABLE(irq_return_iret, bad_iret)
|
|
-
|
|
-#ifdef CONFIG_PARAVIRT
|
|
-ENTRY(native_iret)
|
|
+native_irq_return_iret:
|
|
iretq
|
|
- _ASM_EXTABLE(native_iret, bad_iret)
|
|
-#endif
|
|
+ _ASM_EXTABLE(native_irq_return_iret, bad_iret)
|
|
|
|
#ifdef CONFIG_X86_ESPFIX64
|
|
-irq_return_ldt:
|
|
+native_irq_return_ldt:
|
|
pushq_cfi %rax
|
|
pushq_cfi %rdi
|
|
SWAPGS
|
|
@@ -1098,7 +1095,7 @@
|
|
SWAPGS
|
|
movq %rax,%rsp
|
|
popq_cfi %rax
|
|
- jmp irq_return_iret
|
|
+ jmp native_irq_return_iret
|
|
#endif
|
|
|
|
.section .fixup,"ax"
|
|
@@ -1184,13 +1181,8 @@
|
|
cmpl $__KERNEL_CS,CS(%rdi)
|
|
jne do_double_fault
|
|
movq RIP(%rdi),%rax
|
|
- cmpq $irq_return_iret,%rax
|
|
-#ifdef CONFIG_PARAVIRT
|
|
- je 1f
|
|
- cmpq $native_iret,%rax
|
|
-#endif
|
|
+ cmpq $native_irq_return_iret,%rax
|
|
jne do_double_fault /* This shouldn't happen... */
|
|
-1:
|
|
movq PER_CPU_VAR(kernel_stack),%rax
|
|
subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
|
|
movq %rax,RSP(%rdi)
|
|
@@ -1658,7 +1650,7 @@
|
|
*/
|
|
error_kernelspace:
|
|
incl %ebx
|
|
- leaq irq_return_iret(%rip),%rcx
|
|
+ leaq native_irq_return_iret(%rip),%rcx
|
|
cmpq %rcx,RIP+8(%rsp)
|
|
je error_swapgs
|
|
movl %ecx,%eax /* zero extend */
|
|
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
|
|
index 3f08f34..a1da673 100644
|
|
--- a/arch/x86/kernel/paravirt_patch_64.c
|
|
+++ b/arch/x86/kernel/paravirt_patch_64.c
|
|
@@ -6,7 +6,6 @@
|
|
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
|
|
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
|
|
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
|
|
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
|
@@ -50,7 +49,6 @@
|
|
PATCH_SITE(pv_irq_ops, save_fl);
|
|
PATCH_SITE(pv_irq_ops, irq_enable);
|
|
PATCH_SITE(pv_irq_ops, irq_disable);
|
|
- PATCH_SITE(pv_cpu_ops, iret);
|
|
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret32);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
|