diff mbox series

[v2,6/6] x86/vmx: rework VMX wrappers to use `asm goto()`

Message ID 20250403182250.3329498-7-dmukhin@ford.com (mailing list archive)
State New
Headers show
Series x86/asm: cleanups after toolchain baseline upgrade | expand

Commit Message

Denis Mukhin April 3, 2025, 6:23 p.m. UTC
From: Denis Mukhin <dmukhin@ford.com>

Improve error handling in VMX wrappers by switching to `asm goto()` where
possible.

vmread_safe() kept as is because the minimally required baseline GCC does
not support output operands in `asm goto`.

Resolves: https://gitlab.com/xen-project/xen/-/work_items/210
Signed-off-by: Denis Mukhin <dmukhin@ford.com>
---
 xen/arch/x86/include/asm/hvm/vmx/vmx.h | 141 +++++++++++++------------
 1 file changed, 73 insertions(+), 68 deletions(-)

Comments

Andrew Cooper April 3, 2025, 8:10 p.m. UTC | #1
On 03/04/2025 7:23 pm, dmkhn@proton.me wrote:
> From: Denis Mukhin <dmukhin@ford.com>
>
> Improve error handling in VMX wrappers by switching to `asm goto()` where
> possible.
>
> vmread_safe() kept as is because the minimally required baseline GCC does
> not support output operands in `asm goto`.
>
> Resolves: https://gitlab.com/xen-project/xen/-/work_items/210
> Signed-off-by: Denis Mukhin <dmukhin@ford.com>

I'd suggest limiting this patch to the asm-goto transformations only.

> ---
>  xen/arch/x86/include/asm/hvm/vmx/vmx.h | 141 +++++++++++++------------
>  1 file changed, 73 insertions(+), 68 deletions(-)
>
> diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
> index ed6a6986b9..19d41f7b90 100644
> --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
> @@ -294,54 +294,57 @@ extern uint8_t posted_intr_vector;
>  
>  static always_inline void __vmptrld(u64 addr)
>  {
> -    asm volatile ( "vmptrld %0\n"
> -                   /* CF==1 or ZF==1 --> BUG() */
> -                   UNLIKELY_START(be, vmptrld)
> -                   _ASM_BUGFRAME_TEXT(0)
> -                   UNLIKELY_END_SECTION
> -                   :
> -                   : "m" (addr),
> -                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
> -                   : "memory" );
> +    asm goto ( "vmptrld %[addr]\n"
> +               "jbe %l[vmfail]\n\t"

Also cosmetic, but the very final line in the asm block ideally doesn't
want the \n\t.  Except, this tends to be hard to spot because of the way
we use macros such as UNLIKELY_START() or _ASM_EXTABLE() which do just
expand to more strings under the hood.

> +               :
> +               : [addr] "m" (addr)
> +               : "memory"
> +               : vmfail );
> +    return;
> +
> + vmfail:
> +    BUG();
>  }
>  
>  static always_inline void __vmpclear(u64 addr)
>  {
> -    asm volatile ( "vmclear %0\n"
> -                   /* CF==1 or ZF==1 --> BUG() */
> -                   UNLIKELY_START(be, vmclear)
> -                   _ASM_BUGFRAME_TEXT(0)
> -                   UNLIKELY_END_SECTION
> -                   :
> -                   : "m" (addr),
> -                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
> -                   : "memory" );
> +    asm goto ( "vmclear %[addr]\n"
> +               "jbe %l[vmfail]\n\t"
> +               :
> +               : [addr] "m" (addr)
> +               : "memory"
> +               : vmfail );
> +    return;
> +
> + vmfail:
> +    BUG();
>  }
>  
>  static always_inline void __vmread(unsigned long field, unsigned long *value)
>  {
> -    asm volatile ( "vmread %1, %0\n\t"
> -                   /* CF==1 or ZF==1 --> BUG() */
> -                   UNLIKELY_START(be, vmread)
> -                   _ASM_BUGFRAME_TEXT(0)
> -                   UNLIKELY_END_SECTION
> -                   : "=rm" (*value)
> -                   : "r" (field),
> -                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
> -        );
> +    bool vmfail;
> +
> +    asm volatile ( "vmread %[field], %[value]\n\t"
> +                    "setbe %[vmfail]\n\t"
> +                    : [value] "=rm" (*value), [vmfail] "=rm" (vmfail)
> +                    : [field] "r" (field)
> +                    : "cc" );
> +    if ( vmfail )
> +        BUG();

This is almost certainly not an improvement in generated code.  You now
need register to hold the boolean, where previously there was a jbe to a
ud2 and no extra state required.

Here's an example https://godbolt.org/z/GG4r1c7bK showing the
difference.   (It also shows up the Clang "rm" constraint bug.  Change
vmfail from "=rm" to "=r" to get sane(er) code generation.)

Also, you've added a "cc" clobber.  This is one thing you don't need on
x86; it's simply assumed, given how many instructions update flags
naturally.

> @@ -369,22 +372,22 @@ static inline enum vmx_insn_errno vmread_safe(unsigned long field,
>  static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
>                                                 unsigned long value)
>  {
> -    unsigned long ret = VMX_INSN_SUCCEED;
> -    bool fail_invalid, fail_valid;
> +    unsigned long ret;
>  
> -    asm volatile ( "vmwrite %[value], %[field]\n\t"
> -                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
> -                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
> -                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
> -                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
> -                   : [field] "r" (field),
> -                     [value] "rm" (value) );
> +    asm goto ( "vmwrite %[value], %[field]\n\t"
> +               "jc %l[vmfail_invalid]\n\t"
> +               "jz %l[vmfail_error]\n\t"
> +               :
> +               : [field] "r" (field), [value] "rm" (value)
> +               :
> +               : vmfail_invalid, vmfail_error );
> +    return VMX_INSN_SUCCEED;
>  
> -    if ( unlikely(fail_invalid) )
> -        ret = VMX_INSN_FAIL_INVALID;
> -    else if ( unlikely(fail_valid) )
> -        __vmread(VM_INSTRUCTION_ERROR, &ret);
> + vmfail_invalid:
> +    return VMX_INSN_FAIL_INVALID;
>  
> + vmfail_error:
> +    __vmread(VM_INSTRUCTION_ERROR, &ret);

Something not technically toolchain cleanup, but is in desperate need of
doing.

__vmread() being void and producing an output by pointer is insane, and
leads to ugly code such as this, even if the compiler can usually fix up
behind the scenes.

It would be lovely to have:

unsigned long vmread(unsigned int field)

which can be wrapped by __vmread() with "*value = vmread(field);" or
so.  (Don't go converting all users; that's a huge task).

Then, this vmfail_error can be simply "return
vmread(VM_INSTRUCTION_ERROR);" and you can drop the ret variable.

~Andrew
Jan Beulich April 4, 2025, 7:14 a.m. UTC | #2
On 03.04.2025 22:10, Andrew Cooper wrote:
> On 03/04/2025 7:23 pm, dmkhn@proton.me wrote:
>> --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
>> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
>> @@ -294,54 +294,57 @@ extern uint8_t posted_intr_vector;
>>  
>>  static always_inline void __vmptrld(u64 addr)
>>  {
>> -    asm volatile ( "vmptrld %0\n"
>> -                   /* CF==1 or ZF==1 --> BUG() */
>> -                   UNLIKELY_START(be, vmptrld)
>> -                   _ASM_BUGFRAME_TEXT(0)
>> -                   UNLIKELY_END_SECTION
>> -                   :
>> -                   : "m" (addr),
>> -                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
>> -                   : "memory" );
>> +    asm goto ( "vmptrld %[addr]\n"
>> +               "jbe %l[vmfail]\n\t"
> 
> Also cosmetic, but the very final line in the asm block ideally doesn't
> want the \n\t.

And to clarify (Andrew gave a similar comment elsewhere) the \t instead
wants to appear on the first of these two lines.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index ed6a6986b9..19d41f7b90 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -294,54 +294,57 @@  extern uint8_t posted_intr_vector;
 
 static always_inline void __vmptrld(u64 addr)
 {
-    asm volatile ( "vmptrld %0\n"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, vmptrld)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   :
-                   : "m" (addr),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-                   : "memory" );
+    asm goto ( "vmptrld %[addr]\n"
+               "jbe %l[vmfail]\n\t"
+               :
+               : [addr] "m" (addr)
+               : "memory"
+               : vmfail );
+    return;
+
+ vmfail:
+    BUG();
 }
 
 static always_inline void __vmpclear(u64 addr)
 {
-    asm volatile ( "vmclear %0\n"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, vmclear)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   :
-                   : "m" (addr),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-                   : "memory" );
+    asm goto ( "vmclear %[addr]\n"
+               "jbe %l[vmfail]\n\t"
+               :
+               : [addr] "m" (addr)
+               : "memory"
+               : vmfail );
+    return;
+
+ vmfail:
+    BUG();
 }
 
 static always_inline void __vmread(unsigned long field, unsigned long *value)
 {
-    asm volatile ( "vmread %1, %0\n\t"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, vmread)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   : "=rm" (*value)
-                   : "r" (field),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-        );
+    bool vmfail;
+
+    asm volatile ( "vmread %[field], %[value]\n\t"
+                    "setbe %[vmfail]\n\t"
+                    : [value] "=rm" (*value), [vmfail] "=rm" (vmfail)
+                    : [field] "r" (field)
+                    : "cc" );
+    if ( vmfail )
+        BUG();
 }
 
 static always_inline void __vmwrite(unsigned long field, unsigned long value)
 {
-    asm volatile ( "vmwrite %1, %0\n"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, vmwrite)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   :
-                   : "r" (field) , "rm" (value),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-        );
+    asm goto ( "vmwrite %[value], %[field]\n\t"
+               "jbe %l[vmfail]\n\t"
+               :
+               : [field] "r" (field), [value] "rm" (value)
+               :
+               : vmfail );
+    return;
+
+ vmfail:
+    BUG();
 }
 
 static inline enum vmx_insn_errno vmread_safe(unsigned long field,
@@ -369,22 +372,22 @@  static inline enum vmx_insn_errno vmread_safe(unsigned long field,
 static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
                                                unsigned long value)
 {
-    unsigned long ret = VMX_INSN_SUCCEED;
-    bool fail_invalid, fail_valid;
+    unsigned long ret;
 
-    asm volatile ( "vmwrite %[value], %[field]\n\t"
-                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
-                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
-                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
-                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
-                   : [field] "r" (field),
-                     [value] "rm" (value) );
+    asm goto ( "vmwrite %[value], %[field]\n\t"
+               "jc %l[vmfail_invalid]\n\t"
+               "jz %l[vmfail_error]\n\t"
+               :
+               : [field] "r" (field), [value] "rm" (value)
+               :
+               : vmfail_invalid, vmfail_error );
+    return VMX_INSN_SUCCEED;
 
-    if ( unlikely(fail_invalid) )
-        ret = VMX_INSN_FAIL_INVALID;
-    else if ( unlikely(fail_valid) )
-        __vmread(VM_INSTRUCTION_ERROR, &ret);
+ vmfail_invalid:
+    return VMX_INSN_FAIL_INVALID;
 
+ vmfail_error:
+    __vmread(VM_INSTRUCTION_ERROR, &ret);
     return ret;
 }
 
@@ -402,15 +405,16 @@  static always_inline void __invept(unsigned long type, uint64_t eptp)
          !cpu_has_vmx_ept_invept_single_context )
         type = INVEPT_ALL_CONTEXT;
 
-    asm volatile ( "invept %0, %1\n"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, invept)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION
-                   :
-                   : "m" (operand), "r" (type),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-                   : "memory" );
+    asm goto ( "invept %[operand], %[type]\n"
+               "jbe %l[vmfail]\n\t"
+               :
+               : [operand] "m" (operand), [type] "r" (type)
+               : "memory"
+               : vmfail );
+    return;
+
+ vmfail:
+    BUG();
 }
 
 static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva)
@@ -422,16 +426,17 @@  static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva)
     }  operand = {vpid, 0, gva};
 
     /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
-    asm volatile ( "1: invvpid %0, %1\n"
-                   /* CF==1 or ZF==1 --> BUG() */
-                   UNLIKELY_START(be, invvpid)
-                   _ASM_BUGFRAME_TEXT(0)
-                   UNLIKELY_END_SECTION "\n"
-                   "2:" _ASM_EXTABLE(1b, 2b)
-                   :
-                   : "m" (operand), "r" (type),
-                     _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
-                   : "memory" );
+    asm goto ( "1: invvpid %[operand], %[type]\n"
+               "   jbe %l[vmfail]\n\t"
+               "2:" _ASM_EXTABLE(1b, 2b)
+               :
+               : [operand] "m" (operand), [type] "r" (type)
+               : "memory"
+               : vmfail );
+    return;
+
+ vmfail:
+    BUG();
 }
 
 static inline void ept_sync_all(void)