diff mbox series

[kvm-unit-tests,v2,1/5] x86: emulator.c cleanup: Save and restore exception handlers

Message ID 20220807142832.1576-2-mhal@rbox.co (mailing list archive)
State New, archived
Headers show
Series Test for illegal LEA & related fixes | expand

Commit Message

Michal Luczaj Aug. 7, 2022, 2:28 p.m. UTC
Users of handle_exception() should always save and restore the handlers.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Michal Luczaj <mhal@rbox.co>
---
v1 -> v2: No changes

 x86/emulator.c | 78 ++++++++++++++++++++++++++------------------------
 1 file changed, 41 insertions(+), 37 deletions(-)

Comments

Sean Christopherson Aug. 8, 2022, 3:27 p.m. UTC | #1
On Sun, Aug 07, 2022, Michal Luczaj wrote:
> Users of handle_exception() should always save and restore the handlers.

Might be worth calling out that #UD is intentionally left alone and will be fixed
separately.  No need for to spin a new version though, Paolo can add the note (or
not) if he wants.

> Suggested-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Michal Luczaj <mhal@rbox.co>
> ---

Reviewed-by: Sean Christopherson <seanjc@google.com>
diff mbox series

Patch

diff --git a/x86/emulator.c b/x86/emulator.c
index cd78e3c..769a049 100644
--- a/x86/emulator.c
+++ b/x86/emulator.c
@@ -710,6 +710,7 @@  static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
 	void *page2 = (void *)(&bytes[4096]);
 	struct pte_search search;
 	pteval_t orig_pte;
+	handler old;
 
 	// setup memory for unaligned access
 	mem = (uint32_t *)(&bytes[8]);
@@ -725,10 +726,10 @@  static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
 	asm("movupd %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
 	report(sseeq(v, mem), "movupd unaligned");
 	exceptions = 0;
-	handle_exception(GP_VECTOR, unaligned_movaps_handler);
+	old = handle_exception(GP_VECTOR, unaligned_movaps_handler);
 	asm("movaps %1, %0\n\t unaligned_movaps_cont:"
 			: "=m"(*mem) : "x"(vv));
-	handle_exception(GP_VECTOR, 0);
+	handle_exception(GP_VECTOR, old);
 	report(exceptions == 1, "unaligned movaps exception");
 
 	// setup memory for cross page access
@@ -746,10 +747,10 @@  static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
 	invlpg(page2);
 
 	exceptions = 0;
-	handle_exception(PF_VECTOR, cross_movups_handler);
+	old = handle_exception(PF_VECTOR, cross_movups_handler);
 	asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(vv) :
 			"memory");
-	handle_exception(PF_VECTOR, 0);
+	handle_exception(PF_VECTOR, old);
 	report(exceptions == 1, "movups crosspage exception");
 
 	// restore invalidated page
@@ -817,36 +818,38 @@  static void advance_rip_and_note_exception(struct ex_regs *regs)
 
 static void test_mmx_movq_mf(uint64_t *mem)
 {
-    /* movq %mm0, (%rax) */
-    extern char movq_start, movq_end;
-
-    uint16_t fcw = 0;  /* all exceptions unmasked */
-    write_cr0(read_cr0() & ~6);  /* TS, EM */
-    exceptions = 0;
-    handle_exception(MF_VECTOR, advance_rip_and_note_exception);
-    asm volatile("fninit; fldcw %0" : : "m"(fcw));
-    asm volatile("fldz; fldz; fdivp"); /* generate exception */
-
-    rip_advance = &movq_end - &movq_start;
-    asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:");
-    /* exit MMX mode */
-    asm volatile("fnclex; emms");
-    report(exceptions == 1, "movq mmx generates #MF");
-    handle_exception(MF_VECTOR, 0);
+	/* movq %mm0, (%rax) */
+	extern char movq_start, movq_end;
+	handler old;
+
+	uint16_t fcw = 0;  /* all exceptions unmasked */
+	write_cr0(read_cr0() & ~6);  /* TS, EM */
+	exceptions = 0;
+	old = handle_exception(MF_VECTOR, advance_rip_and_note_exception);
+	asm volatile("fninit; fldcw %0" : : "m"(fcw));
+	asm volatile("fldz; fldz; fdivp"); /* generate exception */
+
+	rip_advance = &movq_end - &movq_start;
+	asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:");
+	/* exit MMX mode */
+	asm volatile("fnclex; emms");
+	report(exceptions == 1, "movq mmx generates #MF");
+	handle_exception(MF_VECTOR, old);
 }
 
 static void test_jmp_noncanonical(uint64_t *mem)
 {
 	extern char nc_jmp_start, nc_jmp_end;
+	handler old;
 
 	*mem = 0x1111111111111111ul;
 
 	exceptions = 0;
 	rip_advance = &nc_jmp_end - &nc_jmp_start;
-	handle_exception(GP_VECTOR, advance_rip_and_note_exception);
+	old = handle_exception(GP_VECTOR, advance_rip_and_note_exception);
 	asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem));
 	report(exceptions == 1, "jump to non-canonical address");
-	handle_exception(GP_VECTOR, 0);
+	handle_exception(GP_VECTOR, old);
 }
 
 static void test_movabs(uint64_t *mem)
@@ -979,22 +982,23 @@  static void ss_bad_rpl(struct ex_regs *regs)
 
 static void test_sreg(volatile uint16_t *mem)
 {
-    u16 ss = read_ss();
+	u16 ss = read_ss();
+	handler old;
 
-    // check for null segment load
-    *mem = 0;
-    asm volatile("mov %0, %%ss" : : "m"(*mem));
-    report(read_ss() == 0, "mov null, %%ss");
-
-    // check for exception when ss.rpl != cpl on null segment load
-    exceptions = 0;
-    handle_exception(GP_VECTOR, ss_bad_rpl);
-    *mem = 3;
-    asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem));
-    report(exceptions == 1 && read_ss() == 0,
-           "mov null, %%ss (with ss.rpl != cpl)");
-    handle_exception(GP_VECTOR, 0);
-    write_ss(ss);
+	// check for null segment load
+	*mem = 0;
+	asm volatile("mov %0, %%ss" : : "m"(*mem));
+	report(read_ss() == 0, "mov null, %%ss");
+
+	// check for exception when ss.rpl != cpl on null segment load
+	exceptions = 0;
+	old = handle_exception(GP_VECTOR, ss_bad_rpl);
+	*mem = 3;
+	asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem));
+	report(exceptions == 1 && read_ss() == 0,
+	       "mov null, %%ss (with ss.rpl != cpl)");
+	handle_exception(GP_VECTOR, old);
+	write_ss(ss);
 }
 
 static uint64_t usr_gs_mov(void)