diff mbox

[v1,1/3] x86/vvmx: add mov-ss blocking check to vmentry

Message ID 20170313105143.20842-2-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli March 13, 2017, 10:51 a.m. UTC
Intel SDM states that if there is a current VMCS and there is MOV-SS
blocking, VMFailValid occurs and control passes to the next instruction.

Implement such behaviour for nested vmlaunch and vmresume.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/hvm/vmx/vvmx.c        | 16 ++++++++++++++++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
 2 files changed, 17 insertions(+)

Comments

Andrew Cooper March 13, 2017, 10:59 a.m. UTC | #1
On 13/03/17 10:51, Sergey Dyasli wrote:
> Intel SDM states that if there is a current VMCS and there is MOV-SS
> blocking, VMFailValid occurs and control passes to the next instruction.
>
> Implement such behaviour for nested vmlaunch and vmresume.
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

The content here looks correct, so Reviewed-by: Andrew Cooper
<andrew.cooper3@citrix.com>

I am wondering however whether we can start introducing transparent
unions and bitfields for the controls, like I did with ept_qual_t

~Andrew
Tian, Kevin March 14, 2017, 9 a.m. UTC | #2
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
> Sent: Monday, March 13, 2017 6:52 PM
> 
> Intel SDM states that if there is a current VMCS and there is MOV-SS blocking,
> VMFailValid occurs and control passes to the next instruction.
> 
> Implement such behaviour for nested vmlaunch and vmresume.
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>
Krish Sadhukhan March 16, 2017, 6:23 p.m. UTC | #3
The Intel SDM also mentions POP-SS. Are you planning to do it via 
another patch ?

Also, I was wondering if it makes more sense to rename the new enum code as

     VMX_INSN_VMENTRY_BLOCKED

since it can then also be used for POP-SS.


-Krish

On 03/13/2017 03:51 AM, Sergey Dyasli wrote:
> Intel SDM states that if there is a current VMCS and there is MOV-SS
> blocking, VMFailValid occurs and control passes to the next instruction.
>
> Implement such behaviour for nested vmlaunch and vmresume.
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
> ---
>   xen/arch/x86/hvm/vmx/vvmx.c        | 16 ++++++++++++++++
>   xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
>   2 files changed, 17 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> index e2c0951..09e4250 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1572,6 +1572,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
>       bool_t launched;
>       struct vcpu *v = current;
>       struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> +    unsigned long intr_shadow;
>       int rc = vmx_inst_check_privilege(regs, 0);
>   
>       if ( rc != X86EMUL_OKAY )
> @@ -1583,6 +1584,13 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
>           return X86EMUL_OKAY;
>       }
>   
> +    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
> +    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
> +    {
> +        vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
> +        return X86EMUL_OKAY;
> +    }
> +
>       launched = vvmcs_launched(&nvmx->launched_list,
>                                 PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
>       if ( !launched )
> @@ -1598,6 +1606,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
>       bool_t launched;
>       struct vcpu *v = current;
>       struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> +    unsigned long intr_shadow;
>       int rc = vmx_inst_check_privilege(regs, 0);
>   
>       if ( rc != X86EMUL_OKAY )
> @@ -1609,6 +1618,13 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
>           return X86EMUL_OKAY;
>       }
>   
> +    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
> +    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
> +    {
> +        vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
> +        return X86EMUL_OKAY;
> +    }
> +
>       launched = vvmcs_launched(&nvmx->launched_list,
>                                 PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
>       if ( launched )
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index f465fff..dc5d91f 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -515,6 +515,7 @@ enum vmx_insn_errno
>       VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID     = 11,
>       VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
>       VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
> +    VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS     = 26,
>       VMX_INSN_FAIL_INVALID                  = ~0,
>   };
>
Sergey Dyasli March 17, 2017, 9 a.m. UTC | #4
On Thu, 2017-03-16 at 11:23 -0700, Krish Sadhukhan wrote:
> The Intel SDM also mentions POP-SS. Are you planning to do it via 

> another patch ?


(SDM from Dec 2016) In "Table 24-3. Format of Interruptibility State"
it reads "This document uses the term “blocking by MOV SS,” but it
applies equally to POP SS."

> 

> Also, I was wondering if it makes more sense to rename the new enum code as

> 

>      VMX_INSN_VMENTRY_BLOCKED

> 

> since it can then also be used for POP-SS.


-- 
Thanks,
Sergey
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index e2c0951..09e4250 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1572,6 +1572,7 @@  int nvmx_handle_vmresume(struct cpu_user_regs *regs)
     bool_t launched;
     struct vcpu *v = current;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    unsigned long intr_shadow;
     int rc = vmx_inst_check_privilege(regs, 0);
 
     if ( rc != X86EMUL_OKAY )
@@ -1583,6 +1584,13 @@  int nvmx_handle_vmresume(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;        
     }
 
+    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
+    {
+        vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
+        return X86EMUL_OKAY;
+    }
+
     launched = vvmcs_launched(&nvmx->launched_list,
                               PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
     if ( !launched )
@@ -1598,6 +1606,7 @@  int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
     bool_t launched;
     struct vcpu *v = current;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    unsigned long intr_shadow;
     int rc = vmx_inst_check_privilege(regs, 0);
 
     if ( rc != X86EMUL_OKAY )
@@ -1609,6 +1618,13 @@  int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;
     }
 
+    __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+    if ( intr_shadow & VMX_INTR_SHADOW_MOV_SS )
+    {
+        vmfail_valid(regs, VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS);
+        return X86EMUL_OKAY;
+    }
+
     launched = vvmcs_launched(&nvmx->launched_list,
                               PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
     if ( launched )
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index f465fff..dc5d91f 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -515,6 +515,7 @@  enum vmx_insn_errno
     VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID     = 11,
     VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
     VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
+    VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS     = 26,
     VMX_INSN_FAIL_INVALID                  = ~0,
 };