Message ID | 20230915150038.602577-7-andrew.cooper3@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86/spec-ctrl: AMD DIV fix, and VERW prerequisite bugfixes | expand |
On 15.09.2023 17:00, Andrew Cooper wrote: > Use %r12 to hold an ist_exit boolean. This register is zero elsewhere in the > entry/exit asm, so it only needs setting in the IST path. > > As this is subtle and fragile, add check_ist_exit() to be used in debugging > builds to cross-check that the ist_exit boolean matches the entry vector. > > Write check_ist_exit() it in C, because it's debug only and the logic more > complicated than I care to maintain in asm. > > For now, we only need to use this signal in the exit-to-Xen path, but some > exit-to-guest paths happen in IST context too. Check the correctness in all > exit paths to avoid the logic bitrotting. > > Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> I understand you then didn't like the idea of macro-izing ... > --- a/xen/arch/x86/x86_64/compat/entry.S > +++ b/xen/arch/x86/x86_64/compat/entry.S > @@ -117,8 +117,15 @@ compat_process_trap: > call compat_create_bounce_frame > jmp compat_test_all_events > > -/* %rbx: struct vcpu, interrupts disabled */ > +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ > ENTRY(compat_restore_all_guest) > + > +#ifdef CONFIG_DEBUG > + mov %rsp, %rdi > + mov %r12, %rsi > + call check_ist_exit > +#endif > + > ASSERT_INTERRUPTS_DISABLED > mov $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d > and UREGS_eflags(%rsp),%r11d > --- a/xen/arch/x86/x86_64/entry.S > +++ b/xen/arch/x86/x86_64/entry.S > @@ -142,8 +142,15 @@ process_trap: > > .section .text.entry, "ax", @progbits > > -/* %rbx: struct vcpu, interrupts disabled */ > +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ > restore_all_guest: > + > +#ifdef CONFIG_DEBUG > + mov %rsp, %rdi > + mov %r12, %rsi > + call check_ist_exit > +#endif > + > ASSERT_INTERRUPTS_DISABLED > > /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ > @@ -659,8 +666,15 @@ ENTRY(early_page_fault) > .section .text.entry, "ax", @progbits > > ALIGN > -/* No special register assumptions. */ > +/* %r12=ist_exit */ > restore_all_xen: > + > +#ifdef CONFIG_DEBUG > + mov %rsp, %rdi > + mov %r12, %rsi > + call check_ist_exit > +#endif ... these three instances of identical code you add, along the lines of ASSERT_INTERRUPTS_DISABLED? Jan
On 18/09/2023 12:02 pm, Jan Beulich wrote: > On 15.09.2023 17:00, Andrew Cooper wrote: >> Use %r12 to hold an ist_exit boolean. This register is zero elsewhere in the >> entry/exit asm, so it only needs setting in the IST path. >> >> As this is subtle and fragile, add check_ist_exit() to be used in debugging >> builds to cross-check that the ist_exit boolean matches the entry vector. >> >> Write check_ist_exit() it in C, because it's debug only and the logic more >> complicated than I care to maintain in asm. >> >> For now, we only need to use this signal in the exit-to-Xen path, but some >> exit-to-guest paths happen in IST context too. Check the correctness in all >> exit paths to avoid the logic bitrotting. >> >> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > Reviewed-by: Jan Beulich <jbeulich@suse.com> Thanks. > > I understand you then didn't like the idea of macro-izing ... > >> --- a/xen/arch/x86/x86_64/compat/entry.S >> +++ b/xen/arch/x86/x86_64/compat/entry.S >> @@ -117,8 +117,15 @@ compat_process_trap: >> call compat_create_bounce_frame >> jmp compat_test_all_events >> >> -/* %rbx: struct vcpu, interrupts disabled */ >> +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ >> ENTRY(compat_restore_all_guest) >> + >> +#ifdef CONFIG_DEBUG >> + mov %rsp, %rdi >> + mov %r12, %rsi >> + call check_ist_exit >> +#endif >> + >> ASSERT_INTERRUPTS_DISABLED >> mov $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d >> and UREGS_eflags(%rsp),%r11d >> --- a/xen/arch/x86/x86_64/entry.S >> +++ b/xen/arch/x86/x86_64/entry.S >> @@ -142,8 +142,15 @@ process_trap: >> >> .section .text.entry, "ax", @progbits >> >> -/* %rbx: struct vcpu, interrupts disabled */ >> +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ >> restore_all_guest: >> + >> +#ifdef CONFIG_DEBUG >> + mov %rsp, %rdi >> + mov %r12, %rsi >> + call check_ist_exit >> +#endif >> + >> ASSERT_INTERRUPTS_DISABLED >> >> /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ >> @@ -659,8 +666,15 @@ ENTRY(early_page_fault) >> .section .text.entry, "ax", @progbits >> >> ALIGN >> -/* No special register assumptions. */ >> +/* %r12=ist_exit */ >> restore_all_xen: >> + >> +#ifdef CONFIG_DEBUG >> + mov %rsp, %rdi >> + mov %r12, %rsi >> + call check_ist_exit >> +#endif > ... these three instances of identical code you add, along the lines of > ASSERT_INTERRUPTS_DISABLED? There's no header that's unique to just the entry.S's, and it's only 3 instructions that need a very specific stack and state layout. The SPEC_CTRL_* macros are already a giant source of pain, and for 3 instructions I don't think the complexity of the abstraction is worth it. Furthermore, I've got some fixes to the other ASSERT_* macros which are going to make them a bit more like this. ~Andrew
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index dead728ce329..0a005f088bca 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2259,6 +2259,19 @@ void asm_domain_crash_synchronous(unsigned long addr) do_softirq(); } +#ifdef CONFIG_DEBUG +void check_ist_exit(const struct cpu_user_regs *regs, bool ist_exit) +{ + const unsigned int ist_mask = + (1U << X86_EXC_NMI) | (1U << X86_EXC_DB) | + (1U << X86_EXC_DF) | (1U << X86_EXC_MC); + uint8_t ev = regs->entry_vector; + bool is_ist = (ev < X86_EXC_NUM) && ((1U << ev) & ist_mask); + + ASSERT(is_ist == ist_exit); +} +#endif + /* * Local variables: * mode: C diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index bd5abd8040bd..7504bfb4f326 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -117,8 +117,15 @@ compat_process_trap: call compat_create_bounce_frame jmp compat_test_all_events -/* %rbx: struct vcpu, interrupts disabled */ +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ ENTRY(compat_restore_all_guest) + +#ifdef CONFIG_DEBUG + mov %rsp, %rdi + mov %r12, %rsi + call check_ist_exit +#endif + ASSERT_INTERRUPTS_DISABLED mov $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d and UREGS_eflags(%rsp),%r11d diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 525877e97330..e5055e5bbf9f 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -142,8 +142,15 @@ process_trap: .section .text.entry, "ax", @progbits -/* %rbx: struct vcpu, interrupts disabled */ +/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */ restore_all_guest: + +#ifdef CONFIG_DEBUG + mov %rsp, %rdi + mov %r12, %rsi + call check_ist_exit +#endif + ASSERT_INTERRUPTS_DISABLED /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ @@ -659,8 +666,15 @@ ENTRY(early_page_fault) .section .text.entry, "ax", @progbits ALIGN -/* No special register assumptions. */ +/* %r12=ist_exit */ restore_all_xen: + +#ifdef CONFIG_DEBUG + mov %rsp, %rdi + mov %r12, %rsi + call check_ist_exit +#endif + /* * Check whether we need to switch to the per-CPU page tables, in * case we return to late PV exit code (from an NMI or #MC). @@ -1087,6 +1101,10 @@ handle_ist_exception: .L_ist_dispatch_done: mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14) + + /* This is an IST exit */ + mov $1, %r12d + cmpb $X86_EXC_NMI, UREGS_entry_vector(%rsp) jne ret_from_intr
Use %r12 to hold an ist_exit boolean. This register is zero elsewhere in the entry/exit asm, so it only needs setting in the IST path. As this is subtle and fragile, add check_ist_exit() to be used in debugging builds to cross-check that the ist_exit boolean matches the entry vector. Write check_ist_exit() it in C, because it's debug only and the logic more complicated than I care to maintain in asm. For now, we only need to use this signal in the exit-to-Xen path, but some exit-to-guest paths happen in IST context too. Check the correctness in all exit paths to avoid the logic bitrotting. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> v2: * %r12 -> %r12d * Extend commit message * Tweak surrounding context --- xen/arch/x86/traps.c | 13 +++++++++++++ xen/arch/x86/x86_64/compat/entry.S | 9 ++++++++- xen/arch/x86/x86_64/entry.S | 22 ++++++++++++++++++++-- 3 files changed, 41 insertions(+), 3 deletions(-)