diff mbox

[3/3] x86/svm: add virtual VMLOAD/VMSAVE support

Message ID 20171031220308.107679-4-brian.woods@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Woods, Brian Oct. 31, 2017, 10:03 p.m. UTC
From: Brian Woods <brian.woods@amd.com>

On AMD family 17h server processors, there is a feature called virtual
VMLOAD/VMSAVE.  This allows a nested hypervisor to preform a VMLOAD or
VMSAVE without needing to be intercepted by the host hypervisor.
Virtual VMLOAD/VMSAVE requires the host hypervisor to be in long mode
and nested page tables to be enabled.  For more information about it
please see:

AMD64 Architecture Programmer’s Manual Volume 2: System Programming
http://support.amd.com/TechDocs/24593.pdf
Section: VMSAVE and VMLOAD Virtualization (Section 15.33.1)

This patch series adds support to check for and enable the virtual
VMLOAD/VMSAVE features if available.

Signed-off-by: Brian Woods <brian.woods@amd.com>
---
 xen/arch/x86/hvm/svm/svm.c      | 1 +
 xen/arch/x86/hvm/svm/svmdebug.c | 2 ++
 xen/arch/x86/hvm/svm/vmcb.c     | 7 +++++++
 3 files changed, 10 insertions(+)

Comments

Andrew Cooper Oct. 31, 2017, 10:15 p.m. UTC | #1
On 31/10/17 22:03, brian.woods@amd.com wrote:
> From: Brian Woods <brian.woods@amd.com>
>
> On AMD family 17h server processors, there is a feature called virtual
> VMLOAD/VMSAVE.  This allows a nested hypervisor to preform a VMLOAD or
> VMSAVE without needing to be intercepted by the host hypervisor.
> Virtual VMLOAD/VMSAVE requires the host hypervisor to be in long mode
> and nested page tables to be enabled.  For more information about it
> please see:
>
> AMD64 Architecture Programmer’s Manual Volume 2: System Programming
> http://support.amd.com/TechDocs/24593.pdf
> Section: VMSAVE and VMLOAD Virtualization (Section 15.33.1)
>
> This patch series adds support to check for and enable the virtual
> VMLOAD/VMSAVE features if available.
>
> Signed-off-by: Brian Woods <brian.woods@amd.com>
> ---
>  xen/arch/x86/hvm/svm/svm.c      | 1 +
>  xen/arch/x86/hvm/svm/svmdebug.c | 2 ++
>  xen/arch/x86/hvm/svm/vmcb.c     | 7 +++++++
>  3 files changed, 10 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index c8ffb17515..60b1288a31 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1669,6 +1669,7 @@ const struct hvm_function_table * __init start_svm(void)
>      P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT");
>      P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
>      P(cpu_has_svm_decode, "DecodeAssists");
> +    P(cpu_has_svm_vloadsave, "Virtual VMLOAD/VMSAVE");
>      P(cpu_has_pause_filter, "Pause-Intercept Filter");
>      P(cpu_has_tsc_ratio, "TSC Rate MSR");
>  #undef P
> diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
> index 89ef2db932..7145e2f5ca 100644
> --- a/xen/arch/x86/hvm/svm/svmdebug.c
> +++ b/xen/arch/x86/hvm/svm/svmdebug.c
> @@ -55,6 +55,8 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb)
>             vmcb->exitinfo1, vmcb->exitinfo2);
>      printk("np_enable = %#"PRIx64" guest_asid = %#x\n",
>             vmcb_get_np_enable(vmcb), vmcb_get_guest_asid(vmcb));
> +    printk("virtual vmload/vmsave = %d  virt_ext = %#"PRIx64"\n",
> +           vmcb->virt_ext.fields.vloadsave_enable, vmcb->virt_ext.bytes);
>      printk("cpl = %d efer = %#"PRIx64" star = %#"PRIx64" lstar = %#"PRIx64"\n",
>             vmcb_get_cpl(vmcb), vmcb_get_efer(vmcb), vmcb->star, vmcb->lstar);
>      printk("CR0 = 0x%016"PRIx64" CR2 = 0x%016"PRIx64"\n",
> diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
> index 997e7597e0..cc35d00bb7 100644
> --- a/xen/arch/x86/hvm/svm/vmcb.c
> +++ b/xen/arch/x86/hvm/svm/vmcb.c
> @@ -200,6 +200,13 @@ static int construct_vmcb(struct vcpu *v)
>  
>          /* PAT is under complete control of SVM when using nested paging. */
>          svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
> +
> +        /* use virtual VMLOAD/VMSAVE if available */
> +        if (cpu_has_svm_vloadsave) {

The style in this file is quite hit and miss, but we expect new code to
conform to the standards.  In this case, the correct style is:

if ( cpu_has_svm_vloadsave )
{

This can be fixed on commit if there are no other comments.

All 3 patches Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

~Andrew

> +            vmcb->virt_ext.fields.vloadsave_enable = 1;
> +            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD;
> +            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE;
> +        }
>      }
>      else
>      {
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c8ffb17515..60b1288a31 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1669,6 +1669,7 @@  const struct hvm_function_table * __init start_svm(void)
     P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT");
     P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
     P(cpu_has_svm_decode, "DecodeAssists");
+    P(cpu_has_svm_vloadsave, "Virtual VMLOAD/VMSAVE");
     P(cpu_has_pause_filter, "Pause-Intercept Filter");
     P(cpu_has_tsc_ratio, "TSC Rate MSR");
 #undef P
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 89ef2db932..7145e2f5ca 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -55,6 +55,8 @@  void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb)
            vmcb->exitinfo1, vmcb->exitinfo2);
     printk("np_enable = %#"PRIx64" guest_asid = %#x\n",
            vmcb_get_np_enable(vmcb), vmcb_get_guest_asid(vmcb));
+    printk("virtual vmload/vmsave = %d  virt_ext = %#"PRIx64"\n",
+           vmcb->virt_ext.fields.vloadsave_enable, vmcb->virt_ext.bytes);
     printk("cpl = %d efer = %#"PRIx64" star = %#"PRIx64" lstar = %#"PRIx64"\n",
            vmcb_get_cpl(vmcb), vmcb_get_efer(vmcb), vmcb->star, vmcb->lstar);
     printk("CR0 = 0x%016"PRIx64" CR2 = 0x%016"PRIx64"\n",
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 997e7597e0..cc35d00bb7 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -200,6 +200,13 @@  static int construct_vmcb(struct vcpu *v)
 
         /* PAT is under complete control of SVM when using nested paging. */
         svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT);
+
+        /* use virtual VMLOAD/VMSAVE if available */
+        if (cpu_has_svm_vloadsave) {
+            vmcb->virt_ext.fields.vloadsave_enable = 1;
+            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD;
+            vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE;
+        }
     }
     else
     {