@@ -1600,6 +1600,14 @@ void asmlinkage do_page_fault(struct cpu_user_regs *regs)
addr = read_cr2();
+ /*
+ * Don't re-enable interrupts if we were running an IRQ-off region when
+ * we hit the page fault, or we'll break that code.
+ */
+ ASSERT(!local_irq_is_enabled());
+ if ( regs->flags & X86_EFLAGS_IF )
+ local_irq_enable();
+
/* fixup_page_fault() might change regs->error_code, so cache it here. */
error_code = regs->error_code;
@@ -844,9 +844,9 @@ handle_exception_saved:
#elif !defined(CONFIG_PV)
ASSERT_CONTEXT_IS_XEN
#endif /* CONFIG_PV */
- sti
-1: movq %rsp,%rdi
- movzbl UREGS_entry_vector(%rsp),%eax
+.Ldispatch_exceptions:
+ mov %rsp, %rdi
+ movzbl UREGS_entry_vector(%rsp), %eax
#ifdef CONFIG_PERF_COUNTERS
lea per_cpu__perfcounters(%rip), %rcx
add STACK_CPUINFO_FIELD(per_cpu_offset)(%r14), %rcx
@@ -866,7 +866,19 @@ handle_exception_saved:
jmp .L_exn_dispatch_done; \
.L_ ## vec ## _done:
+ /*
+ * IRQs kept off to derisk being hit by a nested interrupt before
+ * reading %cr2. Otherwise a page fault in the nested interrupt handler
+ * would corrupt %cr2.
+ */
DISPATCH(X86_EXC_PF, do_page_fault)
+
+ /* Only re-enable IRQs if they were active before taking the fault */
+ testb $X86_EFLAGS_IF >> 8, UREGS_eflags + 1(%rsp)
+ jz 1f
+ sti
+1:
+
DISPATCH(X86_EXC_GP, do_general_protection)
DISPATCH(X86_EXC_UD, do_invalid_op)
DISPATCH(X86_EXC_NM, do_device_not_available)
@@ -911,7 +923,7 @@ exception_with_ints_disabled:
movq %rsp,%rdi
call search_pre_exception_table
testq %rax,%rax # no fixup code for faulting EIP?
- jz 1b
+ jz .Ldispatch_exceptions
movq %rax,UREGS_rip(%rsp) # fixup regular stack
#ifdef CONFIG_XEN_SHSTK