Message ID | 20240122181714.1543738-5-andrew.cooper3@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86/entry: ELF fixes and improvments | expand |
On 22.01.2024 19:17, Andrew Cooper wrote: > Each of these symbols are local to their main function. By not having them > globally visible, livepatch's binary diffing logic can reason about the > functions properly. I'm not happy with this, and not only because of the way you're putting things: "globally visible" is misleading - the symbols aren't global. What you're doing is even eliminate the local symbols from the symbol table. Which in turn yields less easy to follow disassembly. I could perhaps be talked into accepting this as long as we agree to not go as far as converting labels which can be jumped to from other functions as well. Yet even then ... > @@ -859,9 +859,9 @@ handle_exception_saved: > #endif > > /* No special register assumptions. */ > -exception_with_ints_disabled: > +.L_exception_with_ints_disabled: > testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? > - jnz FATAL_exception_with_ints_disabled > + jnz .L_FATAL_exception_with_ints_disabled > movq %rsp,%rdi > call search_pre_exception_table > testq %rax,%rax # no fixup code for faulting EIP? > @@ -891,7 +891,7 @@ exception_with_ints_disabled: > jmp restore_all_xen # return to fixup code > > /* No special register assumptions. */ > -FATAL_exception_with_ints_disabled: > +.L_FATAL_exception_with_ints_disabled: ... perhaps with the exception of this one label I'd like to ask that .L not be followed by an underscore. That carries no useful information, while prefix and name are already properly separated by the difference in case (see e.g. .Lcompat_bounce_exception). To suggest a possible middle ground: Can we perhaps have the .L prefixes added only conditionally upon LIVEPATCH=y (by way of a suitable macro)? Jan
On Mon, Jan 22, 2024 at 06:17:14PM +0000, Andrew Cooper wrote: > Each of these symbols are local to their main function. By not having them > globally visible, livepatch's binary diffing logic can reason about the > functions properly. > > Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > --- > CC: Jan Beulich <JBeulich@suse.com> > CC: Roger Pau Monné <roger.pau@citrix.com> > CC: Wei Liu <wl@xen.org> > CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> > CC: Ross Lagerwall <ross.lagerwall@citrix.com> > > sysenter_eflags_saved() is an open question. This does need to be globally > visible, and I think any livepatch modifying sysenter_entry() will need a > manual adjustment to do_debug() to update the reference there. Hm, yes, this stuff is indeed problematic. There's also the usage of sysenter_entry in do_debug(), which will also need adjusting to account for the position of the payload text replacement, as there's no explicitly guarantee the payload will be loaded at an address greater than the current sysenter_entry position. > --- > xen/arch/x86/x86_64/compat/entry.S | 20 ++++++++++---------- > xen/arch/x86/x86_64/entry.S | 22 +++++++++++----------- > 2 files changed, 21 insertions(+), 21 deletions(-) > > diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S > index 4fbd89cea1a9..1e9e0455eaf3 100644 > --- a/xen/arch/x86/x86_64/compat/entry.S > +++ b/xen/arch/x86/x86_64/compat/entry.S > @@ -41,7 +41,7 @@ FUNC(compat_test_all_events) > shll $IRQSTAT_shift,%eax > leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx > cmpl $0,(%rcx,%rax,1) > - jne compat_process_softirqs > + jne .L_compat_process_softirqs > > /* Inject exception if pending. */ > lea VCPU_trap_bounce(%rbx), %rdx > @@ -49,11 +49,11 @@ FUNC(compat_test_all_events) > jnz .Lcompat_process_trapbounce > > cmpb $0, VCPU_mce_pending(%rbx) > - jne compat_process_mce > + jne .L_compat_process_mce > .Lcompat_test_guest_nmi: > cmpb $0, VCPU_nmi_pending(%rbx) > - jne compat_process_nmi > -compat_test_guest_events: > + jne .L_compat_process_nmi > +.L_compat_test_guest_events: > movq VCPU_vcpu_info(%rbx),%rax > movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax > decl %eax > @@ -71,7 +71,7 @@ compat_test_guest_events: > jmp compat_test_all_events > > /* %rbx: struct vcpu */ > -LABEL_LOCAL(compat_process_softirqs) > +LABEL_LOCAL(.L_compat_process_softirqs) > sti > call do_softirq > jmp compat_test_all_events > @@ -84,7 +84,7 @@ LABEL_LOCAL(.Lcompat_process_trapbounce) > jmp compat_test_all_events > > /* %rbx: struct vcpu */ > -LABEL_LOCAL(compat_process_mce) > +LABEL_LOCAL(.L_compat_process_mce) > testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) > jnz .Lcompat_test_guest_nmi > sti > @@ -96,12 +96,12 @@ LABEL_LOCAL(compat_process_mce) > movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall > orl $1 << VCPU_TRAP_MCE,%edx > movb %dl,VCPU_async_exception_mask(%rbx) > - jmp compat_process_trap > + jmp .L_compat_process_trap > > /* %rbx: struct vcpu */ > -LABEL_LOCAL(compat_process_nmi) > +LABEL_LOCAL(.L_compat_process_nmi) > testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx) > - jnz compat_test_guest_events > + jnz .L_compat_test_guest_events > sti > movb $0, VCPU_nmi_pending(%rbx) > call set_guest_nmi_trapbounce > @@ -112,7 +112,7 @@ LABEL_LOCAL(compat_process_nmi) > orl $1 << VCPU_TRAP_NMI,%edx > movb %dl,VCPU_async_exception_mask(%rbx) > /* FALLTHROUGH */ > -compat_process_trap: > +.L_compat_process_trap: > leaq VCPU_trap_bounce(%rbx),%rdx > call compat_create_bounce_frame > jmp compat_test_all_events > diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S > index fc64ef1fd460..130462ba0e1a 100644 > --- a/xen/arch/x86/x86_64/entry.S > +++ b/xen/arch/x86/x86_64/entry.S > @@ -188,7 +188,7 @@ FUNC_LOCAL(restore_all_guest) > > RESTORE_ALL > testw $TRAP_syscall,4(%rsp) > - jz iret_exit_to_guest > + jz .L_iret_exit_to_guest > > movq 24(%rsp),%r11 # RFLAGS > andq $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11 > @@ -220,7 +220,7 @@ FUNC_LOCAL(restore_all_guest) > LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest) > movq 8(%rsp), %rcx # RIP > /* No special register assumptions. */ > -iret_exit_to_guest: > +.L_iret_exit_to_guest: > andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp) > orl $X86_EFLAGS_IF,24(%rsp) > addq $8,%rsp > @@ -432,10 +432,10 @@ UNLIKELY_END(msi_check) > cmove %rdi, %rdx > > test %rdx, %rdx > - jz int80_slow_path > + jz .L_int80_slow_path > #else > test %rdi, %rdi > - jz int80_slow_path > + jz .L_int80_slow_path > #endif > > /* Construct trap_bounce from trap_ctxt[0x80]. */ > @@ -457,7 +457,7 @@ UNLIKELY_END(msi_check) > call create_bounce_frame > jmp test_all_events > > -int80_slow_path: > +.L_int80_slow_path: > /* > * Setup entry vector and error code as if this was a GPF caused by an > * IDT entry with DPL==0. > @@ -472,7 +472,7 @@ int80_slow_path: > * need to set up %r14 here, while %r15 is required to still be zero. > */ > GET_STACK_END(14) > - jmp handle_exception_saved > + jmp .L_handle_exception_saved This one is IMO problematic, as you are jumping from function symbol entry_int80 into handle_exception. Any livepatch that modifies handle_exception will quite likely break that jump, as the jump won't be relocated to point at the new position of the local label inside of the new handle_exception text. handle_exception needs further work in order to be livepatched, and making handle_exception_saved local makes it looks like it's safe to patch, while it's not. We need to split handle_exception_saved into a separate function, so it has it's own section, as long as it's referenced from other functions. Thanks, Roger.
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 4fbd89cea1a9..1e9e0455eaf3 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -41,7 +41,7 @@ FUNC(compat_test_all_events) shll $IRQSTAT_shift,%eax leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx cmpl $0,(%rcx,%rax,1) - jne compat_process_softirqs + jne .L_compat_process_softirqs /* Inject exception if pending. */ lea VCPU_trap_bounce(%rbx), %rdx @@ -49,11 +49,11 @@ FUNC(compat_test_all_events) jnz .Lcompat_process_trapbounce cmpb $0, VCPU_mce_pending(%rbx) - jne compat_process_mce + jne .L_compat_process_mce .Lcompat_test_guest_nmi: cmpb $0, VCPU_nmi_pending(%rbx) - jne compat_process_nmi -compat_test_guest_events: + jne .L_compat_process_nmi +.L_compat_test_guest_events: movq VCPU_vcpu_info(%rbx),%rax movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax decl %eax @@ -71,7 +71,7 @@ compat_test_guest_events: jmp compat_test_all_events /* %rbx: struct vcpu */ -LABEL_LOCAL(compat_process_softirqs) +LABEL_LOCAL(.L_compat_process_softirqs) sti call do_softirq jmp compat_test_all_events @@ -84,7 +84,7 @@ LABEL_LOCAL(.Lcompat_process_trapbounce) jmp compat_test_all_events /* %rbx: struct vcpu */ -LABEL_LOCAL(compat_process_mce) +LABEL_LOCAL(.L_compat_process_mce) testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx) jnz .Lcompat_test_guest_nmi sti @@ -96,12 +96,12 @@ LABEL_LOCAL(compat_process_mce) movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall orl $1 << VCPU_TRAP_MCE,%edx movb %dl,VCPU_async_exception_mask(%rbx) - jmp compat_process_trap + jmp .L_compat_process_trap /* %rbx: struct vcpu */ -LABEL_LOCAL(compat_process_nmi) +LABEL_LOCAL(.L_compat_process_nmi) testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx) - jnz compat_test_guest_events + jnz .L_compat_test_guest_events sti movb $0, VCPU_nmi_pending(%rbx) call set_guest_nmi_trapbounce @@ -112,7 +112,7 @@ LABEL_LOCAL(compat_process_nmi) orl $1 << VCPU_TRAP_NMI,%edx movb %dl,VCPU_async_exception_mask(%rbx) /* FALLTHROUGH */ -compat_process_trap: +.L_compat_process_trap: leaq VCPU_trap_bounce(%rbx),%rdx call compat_create_bounce_frame jmp compat_test_all_events diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index fc64ef1fd460..130462ba0e1a 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -188,7 +188,7 @@ FUNC_LOCAL(restore_all_guest) RESTORE_ALL testw $TRAP_syscall,4(%rsp) - jz iret_exit_to_guest + jz .L_iret_exit_to_guest movq 24(%rsp),%r11 # RFLAGS andq $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11 @@ -220,7 +220,7 @@ FUNC_LOCAL(restore_all_guest) LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest) movq 8(%rsp), %rcx # RIP /* No special register assumptions. */ -iret_exit_to_guest: +.L_iret_exit_to_guest: andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp) orl $X86_EFLAGS_IF,24(%rsp) addq $8,%rsp @@ -432,10 +432,10 @@ UNLIKELY_END(msi_check) cmove %rdi, %rdx test %rdx, %rdx - jz int80_slow_path + jz .L_int80_slow_path #else test %rdi, %rdi - jz int80_slow_path + jz .L_int80_slow_path #endif /* Construct trap_bounce from trap_ctxt[0x80]. */ @@ -457,7 +457,7 @@ UNLIKELY_END(msi_check) call create_bounce_frame jmp test_all_events -int80_slow_path: +.L_int80_slow_path: /* * Setup entry vector and error code as if this was a GPF caused by an * IDT entry with DPL==0. @@ -472,7 +472,7 @@ int80_slow_path: * need to set up %r14 here, while %r15 is required to still be zero. */ GET_STACK_END(14) - jmp handle_exception_saved + jmp .L_handle_exception_saved END(entry_int80) /* create_bounce_frame & helpers don't need to be in .text.entry */ @@ -750,10 +750,10 @@ FUNC(handle_exception, 0) cmovnz %r12d, %r13d .Lxcpt_cr3_okay: -handle_exception_saved: +.L_handle_exception_saved: GET_CURRENT(bx) testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) - jz exception_with_ints_disabled + jz .L_exception_with_ints_disabled #if defined(CONFIG_PV32) ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \ @@ -859,9 +859,9 @@ handle_exception_saved: #endif /* No special register assumptions. */ -exception_with_ints_disabled: +.L_exception_with_ints_disabled: testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? - jnz FATAL_exception_with_ints_disabled + jnz .L_FATAL_exception_with_ints_disabled movq %rsp,%rdi call search_pre_exception_table testq %rax,%rax # no fixup code for faulting EIP? @@ -891,7 +891,7 @@ exception_with_ints_disabled: jmp restore_all_xen # return to fixup code /* No special register assumptions. */ -FATAL_exception_with_ints_disabled: +.L_FATAL_exception_with_ints_disabled: xorl %esi,%esi movq %rsp,%rdi tailcall fatal_trap
Each of these symbols are local to their main function. By not having them globally visible, livepatch's binary diffing logic can reason about the functions properly. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> CC: Ross Lagerwall <ross.lagerwall@citrix.com> sysenter_eflags_saved() is an open question. This does need to be globally visible, and I think any livepatch modifying sysenter_entry() will need a manual adjustment to do_debug() to update the reference there. --- xen/arch/x86/x86_64/compat/entry.S | 20 ++++++++++---------- xen/arch/x86/x86_64/entry.S | 22 +++++++++++----------- 2 files changed, 21 insertions(+), 21 deletions(-)