diff mbox series

KVM: VMX: access regs array in vmenter.S in its natural order

Message ID 20200310171024.15528-1-ubizjak@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: access regs array in vmenter.S in its natural order | expand

Commit Message

Uros Bizjak March 10, 2020, 5:10 p.m. UTC
Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
Reorder access to "regs" array in vmenter.S to follow its natural order.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
 arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

Sean Christopherson March 10, 2020, 6:24 p.m. UTC | #1
On Tue, Mar 10, 2020 at 06:10:24PM +0100, Uros Bizjak wrote:
> Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
> Reorder access to "regs" array in vmenter.S to follow its natural order.

Any reason other than preference?  I wouldn't exactly call the register
indices "natural", e.g. IMO it's easier to visually confirm correctness if
A/B/C/D are ordered alphabetically.

> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
>  arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 81ada2ce99e7..ca2065166d1d 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
>  	cmpb $0, %bl
>  
>  	/* Load guest registers.  Don't clobber flags. */
> -	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
>  	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
>  	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
> +	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> +	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
>  	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
>  	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> -	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
>  #ifdef CONFIG_X86_64
>  	mov VCPU_R8 (%_ASM_AX),  %r8
>  	mov VCPU_R9 (%_ASM_AX),  %r9
> @@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
>  
>  	/* Save all guest registers, including RAX from the stack */
>  	__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
> -	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
>  	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
>  	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
> +	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
> +	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
>  	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
>  	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
> -	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
>  #ifdef CONFIG_X86_64
>  	mov %r8,  VCPU_R8 (%_ASM_AX)
>  	mov %r9,  VCPU_R9 (%_ASM_AX)
> @@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
>  	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
>  	 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
>  	 */
> -1:	xor %ebx, %ebx
> -	xor %ecx, %ecx
> +1:	xor %ecx, %ecx
>  	xor %edx, %edx
> +	xor %ebx, %ebx
> +	xor %ebp, %ebp
>  	xor %esi, %esi
>  	xor %edi, %edi
> -	xor %ebp, %ebp
>  #ifdef CONFIG_X86_64
>  	xor %r8d,  %r8d
>  	xor %r9d,  %r9d
> -- 
> 2.24.1
>
Uros Bizjak March 10, 2020, 7:16 p.m. UTC | #2
On Tue, Mar 10, 2020 at 7:24 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Tue, Mar 10, 2020 at 06:10:24PM +0100, Uros Bizjak wrote:
> > Registers in "regs" array are indexed as rax/rcx/rdx/.../rsi/rdi/r8/...
> > Reorder access to "regs" array in vmenter.S to follow its natural order.
>
> Any reason other than preference?  I wouldn't exactly call the register
> indices "natural", e.g. IMO it's easier to visually confirm correctness if
> A/B/C/D are ordered alphabetically.

Yes. Looking at assembly, the offsets now increase nicely:

  71:   48 8b 48 08             mov    0x8(%rax),%rcx
  75:   48 8b 50 10             mov    0x10(%rax),%rdx
  79:   48 8b 58 18             mov    0x18(%rax),%rbx
  7d:   48 8b 68 28             mov    0x28(%rax),%rbp
  81:   48 8b 70 30             mov    0x30(%rax),%rsi
  85:   48 8b 78 38             mov    0x38(%rax),%rdi
  89:   4c 8b 40 40             mov    0x40(%rax),%r8
  8d:   4c 8b 48 48             mov    0x48(%rax),%r9
  91:   4c 8b 50 50             mov    0x50(%rax),%r10
  95:   4c 8b 58 58             mov    0x58(%rax),%r11
  99:   4c 8b 60 60             mov    0x60(%rax),%r12
  9d:   4c 8b 68 68             mov    0x68(%rax),%r13
  a1:   4c 8b 70 70             mov    0x70(%rax),%r14
  a5:   4c 8b 78 78             mov    0x78(%rax),%r15

and noticing that ptrace.c processes registers in the order of their
position in pt_regs, I was under impression that the current vmenter.S
order is some remnant of recent __asm to .S conversion.

For sure, I can happily live with current order, so not a big thing,
and the patch could be ignored without much fuss.

FYI, the original x86 registers were not named in alphabetical order.
Their names are explained in e.g. [1].

[1] https://en.wikibooks.org/wiki/X86_Assembly/X86_Architecture

>
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > ---
> >  arch/x86/kvm/vmx/vmenter.S | 14 +++++++-------
> >  1 file changed, 7 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 81ada2ce99e7..ca2065166d1d 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> >       cmpb $0, %bl
> >
> >       /* Load guest registers.  Don't clobber flags. */
> > -     mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> >       mov VCPU_RCX(%_ASM_AX), %_ASM_CX
> >       mov VCPU_RDX(%_ASM_AX), %_ASM_DX
> > +     mov VCPU_RBX(%_ASM_AX), %_ASM_BX
> > +     mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> >       mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> >       mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> > -     mov VCPU_RBP(%_ASM_AX), %_ASM_BP
> >  #ifdef CONFIG_X86_64
> >       mov VCPU_R8 (%_ASM_AX),  %r8
> >       mov VCPU_R9 (%_ASM_AX),  %r9
> > @@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> >
> >       /* Save all guest registers, including RAX from the stack */
> >       __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
> > -     mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
> >       mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
> >       mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
> > +     mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
> > +     mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
> >       mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
> >       mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
> > -     mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
> >  #ifdef CONFIG_X86_64
> >       mov %r8,  VCPU_R8 (%_ASM_AX)
> >       mov %r9,  VCPU_R9 (%_ASM_AX)
> > @@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
> >        * free.  RSP and RAX are exempt as RSP is restored by hardware during
> >        * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
> >        */
> > -1:   xor %ebx, %ebx
> > -     xor %ecx, %ecx
> > +1:   xor %ecx, %ecx
> >       xor %edx, %edx
> > +     xor %ebx, %ebx
> > +     xor %ebp, %ebp
> >       xor %esi, %esi
> >       xor %edi, %edi
> > -     xor %ebp, %ebp
> >  #ifdef CONFIG_X86_64
> >       xor %r8d,  %r8d
> >       xor %r9d,  %r9d
> > --
> > 2.24.1
> >
Paolo Bonzini March 11, 2020, 6:29 p.m. UTC | #3
After all I probably prefer Uros's order, since he took the time to send
a patch I'll apply it.

And now, a short history lecture on 8-bit processors for the younger,
and a walk down memory lane for everyone else...

On 10/03/20 20:16, Uros Bizjak wrote:
> FYI, the original x86 registers were not named in alphabetical order.
> Their names are explained in e.g. [1].

I think the reason why BX comes last is that it maps to the HL register
on the 8080, where "location pointed by HL" was the only available
addressing mode.

Likewise, CX maps to the BC register of the 8080.  That is only really
apparent if you take into account Z80 extensions such as the DJNZ or
LDIR instructions, but the Z80 predates the 8086 anyway.

So A -> AX, BC -> CX, DE -> DX, HL -> BX.

Thanks,

Roy Batty^A^KPaolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 81ada2ce99e7..ca2065166d1d 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -135,12 +135,12 @@  SYM_FUNC_START(__vmx_vcpu_run)
 	cmpb $0, %bl
 
 	/* Load guest registers.  Don't clobber flags. */
-	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
 	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
 	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
+	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
+	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
 	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
 	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
-	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
 #ifdef CONFIG_X86_64
 	mov VCPU_R8 (%_ASM_AX),  %r8
 	mov VCPU_R9 (%_ASM_AX),  %r9
@@ -168,12 +168,12 @@  SYM_FUNC_START(__vmx_vcpu_run)
 
 	/* Save all guest registers, including RAX from the stack */
 	__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
-	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
 	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
 	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
+	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
+	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
 	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
 	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
-	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
 #ifdef CONFIG_X86_64
 	mov %r8,  VCPU_R8 (%_ASM_AX)
 	mov %r9,  VCPU_R9 (%_ASM_AX)
@@ -197,12 +197,12 @@  SYM_FUNC_START(__vmx_vcpu_run)
 	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
 	 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
 	 */
-1:	xor %ebx, %ebx
-	xor %ecx, %ecx
+1:	xor %ecx, %ecx
 	xor %edx, %edx
+	xor %ebx, %ebx
+	xor %ebp, %ebp
 	xor %esi, %esi
 	xor %edi, %edi
-	xor %ebp, %ebp
 #ifdef CONFIG_X86_64
 	xor %r8d,  %r8d
 	xor %r9d,  %r9d