RESTORE_ARGS 1,8,1
irq_return:
- _ASM_EXTABLE(irq_return, bad_iret)
++ INTERRUPT_RETURN
- #ifdef CONFIG_PARAVIRT
++
-- jnz irq_return_ldt
++ENTRY(native_iret)
+ /*
+ * Are we returning to a stack segment from the LDT? Note: in
+ * 64-bit mode SS:RSP on the exception stack is always valid.
+ */
+ #ifdef CONFIG_X86_ESPFIX64
+ testb $4,(SS-RIP)(%rsp)
--irq_return_iret:
-- INTERRUPT_RETURN
-- _ASM_EXTABLE(irq_return_iret, bad_iret)
--
--#ifdef CONFIG_PARAVIRT
--ENTRY(native_iret)
+++ jnz native_irq_return_ldt
+ #endif
+
+++native_irq_return_iret:
iretq
--- _ASM_EXTABLE(native_iret, bad_iret)
--#endif
+++ _ASM_EXTABLE(native_irq_return_iret, bad_iret)
+
+ #ifdef CONFIG_X86_ESPFIX64
--irq_return_ldt:
+++native_irq_return_ldt:
+ pushq_cfi %rax
+ pushq_cfi %rdi
+ SWAPGS
+ movq PER_CPU_VAR(espfix_waddr),%rdi
+ movq %rax,(0*8)(%rdi) /* RAX */
+ movq (2*8)(%rsp),%rax /* RIP */
+ movq %rax,(1*8)(%rdi)
+ movq (3*8)(%rsp),%rax /* CS */
+ movq %rax,(2*8)(%rdi)
+ movq (4*8)(%rsp),%rax /* RFLAGS */
+ movq %rax,(3*8)(%rdi)
+ movq (6*8)(%rsp),%rax /* SS */
+ movq %rax,(5*8)(%rdi)
+ movq (5*8)(%rsp),%rax /* RSP */
+ movq %rax,(4*8)(%rdi)
+ andl $0xffff0000,%eax
+ popq_cfi %rdi
+ orq PER_CPU_VAR(espfix_stack),%rax
+ SWAPGS
+ movq %rax,%rsp
+ popq_cfi %rax
-- jmp irq_return_iret
+++ jmp native_irq_return_iret
#endif
.section .fixup,"ax"
call preempt_schedule_irq
jmp exit_intr
#endif
-
CFI_ENDPROC
END(common_interrupt)
- /*
- * End of kprobes section
- */
- .popsection
+
+ /*
+ * If IRET takes a fault on the espfix stack, then we
+ * end up promoting it to a doublefault. In that case,
+ * modify the stack to make it look like we just entered
+ * the #GP handler from user space, similar to bad_iret.
+ */
+ #ifdef CONFIG_X86_ESPFIX64
+ ALIGN
+ __do_double_fault:
+ XCPT_FRAME 1 RDI+8
+ movq RSP(%rdi),%rax /* Trap on the espfix stack? */
+ sarq $PGDIR_SHIFT,%rax
+ cmpl $ESPFIX_PGD_ENTRY,%eax
+ jne do_double_fault /* No, just deliver the fault */
+ cmpl $__KERNEL_CS,CS(%rdi)
+ jne do_double_fault
+ movq RIP(%rdi),%rax
-- cmpq $irq_return_iret,%rax
--#ifdef CONFIG_PARAVIRT
-- je 1f
-- cmpq $native_iret,%rax
--#endif
+++ cmpq $native_irq_return_iret,%rax
+ jne do_double_fault /* This shouldn't happen... */
--1:
+ movq PER_CPU_VAR(kernel_stack),%rax
+ subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
+ movq %rax,RSP(%rdi)
+ movq $0,(%rax) /* Missing (lost) #GP error code */
+ movq $general_protection,RIP(%rdi)
+ retq
+ CFI_ENDPROC
+ END(__do_double_fault)
+ #else
+ # define __do_double_fault do_double_fault
+ #endif
/*
* APIC interrupts.
* compat mode. Check for these here too.
*/
error_kernelspace:
+++ CFI_REL_OFFSET rcx, RCX+8
incl %ebx
- leaq irq_return(%rip),%rcx
-- leaq irq_return_iret(%rip),%rcx
+++ leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
je error_swapgs
movl %ecx,%eax /* zero extend */