diff mbox series

[v2,1/3] x86/svm: Drop the _enabled suffix from vmcb bits

Message ID 8e6d8cac54ca05f1202580b574c548e08988d8f6.1710149462.git.vaishali.thakkar@vates.tech (mailing list archive)
State New, archived
Headers show
Series x86/svm : Misc changes for few vmcb bits | expand

Commit Message

Vaishali Thakkar March 11, 2024, 12:40 p.m. UTC
The suffix is redundant for np/sev/sev-es bits. Drop it
to avoid adding extra code volume. While we're here, drop
the double negations in one of the instances of _np bit
and replace 0/1 with false/true in the use cases of _np.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Vaishali Thakkar <vaishali.thakkar@vates.tech>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v1:
        - Address Andrew and Jan's reviews related to dropping
          double negation and replacing 0/1 with false/true
        - Fix the typo around signed-off-by
---
 xen/arch/x86/hvm/svm/nestedsvm.c        | 14 +++++++-------
 xen/arch/x86/hvm/svm/svm.c              |  2 +-
 xen/arch/x86/hvm/svm/vmcb.c             |  2 +-
 xen/arch/x86/include/asm/hvm/svm/vmcb.h | 18 +++++++++---------
 4 files changed, 18 insertions(+), 18 deletions(-)

Comments

Jan Beulich March 12, 2024, 7:54 a.m. UTC | #1
On 11.03.2024 13:40, Vaishali Thakkar wrote:
> --- a/xen/arch/x86/hvm/svm/nestedsvm.c
> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c
> @@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>      if ( nestedhvm_paging_mode_hap(v) )
>      {
>          /* host nested paging + guest nested paging. */
> -        n2vmcb->_np_enable = 1;
> +        n2vmcb->_np = true;
>  
>          nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
>  
> @@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>      else if ( paging_mode_hap(v->domain) )
>      {
>          /* host nested paging + guest shadow paging. */
> -        n2vmcb->_np_enable = 1;
> +        n2vmcb->_np = true;
>          /* Keep h_cr3 as it is. */
>          n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
>          /* When l1 guest does shadow paging
> @@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>      else
>      {
>          /* host shadow paging + guest shadow paging. */
> -        n2vmcb->_np_enable = 0;
> +        n2vmcb->_np = false;
>          n2vmcb->_h_cr3 = 0x0;
>  
>          /* TODO: Once shadow-shadow paging is in place come back to here
> @@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
>      }
>  
>      /* nested paging for the guest */
> -    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
> +    svm->ns_hap_enabled = ns_vmcb->_np;
>  
>      /* Remember the V_INTR_MASK in hostflags */
>      svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
> @@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>      if ( nestedhvm_paging_mode_hap(v) )
>      {
>          /* host nested paging + guest nested paging. */
> -        ns_vmcb->_np_enable = n2vmcb->_np_enable;
> +        ns_vmcb->_np = n2vmcb->_np;
>          ns_vmcb->_cr3 = n2vmcb->_cr3;
>          /* The vmcb->h_cr3 is the shadowed h_cr3. The original
>           * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
> @@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>      else if ( paging_mode_hap(v->domain) )
>      {
>          /* host nested paging + guest shadow paging. */
> -        ns_vmcb->_np_enable = 0;
> +        ns_vmcb->_np = false;
>          /* Throw h_cr3 away. Guest is not allowed to set it or
>           * it can break out, otherwise (security hole!) */
>          ns_vmcb->_h_cr3 = 0x0;
> @@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>      else
>      {
>          /* host shadow paging + guest shadow paging. */
> -        ns_vmcb->_np_enable = 0;
> +        ns_vmcb->_np = false;
>          ns_vmcb->_h_cr3 = 0x0;
>          /* The vmcb->_cr3 is the shadowed cr3. The original
>           * unshadowed guest cr3 is kept in ns_vmcb->_cr3,

While spotting the small issue below it occurred to me: Why is it that
vmcb_set_...() is open-coded everywhere here? I think this would be
pretty nice to avoid at the same time (for lines touched anyway, or in
a separate prereq patch, or alternatively [and only ideally] for all
other instances in a follow-on patch). Thoughts?

> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -473,7 +473,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
>  
>      if ( paging_mode_hap(v->domain) )
>      {
> -        vmcb_set_np_enable(vmcb, 1);
> +        vmcb_set_np(vmcb, 1);

No switching to "true" here? (If the answer to the other question is
"No" for whatever reason, I'd nevertheless like to see this on adjusted,
which could then be done while committing.)

Jan
Vaishali Thakkar March 12, 2024, 10 a.m. UTC | #2
On 3/12/24 08:54, Jan Beulich wrote:
> On 11.03.2024 13:40, Vaishali Thakkar wrote:
>> --- a/xen/arch/x86/hvm/svm/nestedsvm.c
>> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c
>> @@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>       if ( nestedhvm_paging_mode_hap(v) )
>>       {
>>           /* host nested paging + guest nested paging. */
>> -        n2vmcb->_np_enable = 1;
>> +        n2vmcb->_np = true;
>>   
>>           nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
>>   
>> @@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>       else if ( paging_mode_hap(v->domain) )
>>       {
>>           /* host nested paging + guest shadow paging. */
>> -        n2vmcb->_np_enable = 1;
>> +        n2vmcb->_np = true;
>>           /* Keep h_cr3 as it is. */
>>           n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
>>           /* When l1 guest does shadow paging
>> @@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>       else
>>       {
>>           /* host shadow paging + guest shadow paging. */
>> -        n2vmcb->_np_enable = 0;
>> +        n2vmcb->_np = false;
>>           n2vmcb->_h_cr3 = 0x0;
>>   
>>           /* TODO: Once shadow-shadow paging is in place come back to here
>> @@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
>>       }
>>   
>>       /* nested paging for the guest */
>> -    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
>> +    svm->ns_hap_enabled = ns_vmcb->_np;
>>   
>>       /* Remember the V_INTR_MASK in hostflags */
>>       svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
>> @@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>       if ( nestedhvm_paging_mode_hap(v) )
>>       {
>>           /* host nested paging + guest nested paging. */
>> -        ns_vmcb->_np_enable = n2vmcb->_np_enable;
>> +        ns_vmcb->_np = n2vmcb->_np;
>>           ns_vmcb->_cr3 = n2vmcb->_cr3;
>>           /* The vmcb->h_cr3 is the shadowed h_cr3. The original
>>            * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
>> @@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>       else if ( paging_mode_hap(v->domain) )
>>       {
>>           /* host nested paging + guest shadow paging. */
>> -        ns_vmcb->_np_enable = 0;
>> +        ns_vmcb->_np = false;
>>           /* Throw h_cr3 away. Guest is not allowed to set it or
>>            * it can break out, otherwise (security hole!) */
>>           ns_vmcb->_h_cr3 = 0x0;
>> @@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>       else
>>       {
>>           /* host shadow paging + guest shadow paging. */
>> -        ns_vmcb->_np_enable = 0;
>> +        ns_vmcb->_np = false;
>>           ns_vmcb->_h_cr3 = 0x0;
>>           /* The vmcb->_cr3 is the shadowed cr3. The original
>>            * unshadowed guest cr3 is kept in ns_vmcb->_cr3,
> 
> While spotting the small issue below it occurred to me: Why is it that
> vmcb_set_...() is open-coded everywhere here? I think this would be
> pretty nice to avoid at the same time (for lines touched anyway, or in
> a separate prereq patch, or alternatively [and only ideally] for all
> other instances in a follow-on patch). Thoughts?

Yes, I noticed this too. My plan was to send a followup patch for
fixing all the instances where vmcb_set/get_...() can be used.
There are bunch of other vmcb bits (apart from the ones being
handled in this patchset) in this file and in svm.c which can
benefit from using VMCB accessors.

>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -473,7 +473,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
>>   
>>       if ( paging_mode_hap(v->domain) )
>>       {
>> -        vmcb_set_np_enable(vmcb, 1);
>> +        vmcb_set_np(vmcb, 1);
> 
> No switching to "true" here? (If the answer to the other question is
> "No" for whatever reason, I'd nevertheless like to see this on adjusted,
> which could then be done while committing.)

Sorry, I missed this instance. I'll fix it if I'll need to send another
revised patchset for some other fixes (based on review comments), else
feel free to fix it while committing. Thanks.

> Jan
Jan Beulich March 12, 2024, 10:49 a.m. UTC | #3
On 12.03.2024 11:00, Vaishali Thakkar wrote:
> On 3/12/24 08:54, Jan Beulich wrote:
>> On 11.03.2024 13:40, Vaishali Thakkar wrote:
>>> --- a/xen/arch/x86/hvm/svm/nestedsvm.c
>>> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c
>>> @@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>       if ( nestedhvm_paging_mode_hap(v) )
>>>       {
>>>           /* host nested paging + guest nested paging. */
>>> -        n2vmcb->_np_enable = 1;
>>> +        n2vmcb->_np = true;
>>>   
>>>           nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
>>>   
>>> @@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>       else if ( paging_mode_hap(v->domain) )
>>>       {
>>>           /* host nested paging + guest shadow paging. */
>>> -        n2vmcb->_np_enable = 1;
>>> +        n2vmcb->_np = true;
>>>           /* Keep h_cr3 as it is. */
>>>           n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
>>>           /* When l1 guest does shadow paging
>>> @@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>       else
>>>       {
>>>           /* host shadow paging + guest shadow paging. */
>>> -        n2vmcb->_np_enable = 0;
>>> +        n2vmcb->_np = false;
>>>           n2vmcb->_h_cr3 = 0x0;
>>>   
>>>           /* TODO: Once shadow-shadow paging is in place come back to here
>>> @@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
>>>       }
>>>   
>>>       /* nested paging for the guest */
>>> -    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
>>> +    svm->ns_hap_enabled = ns_vmcb->_np;
>>>   
>>>       /* Remember the V_INTR_MASK in hostflags */
>>>       svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
>>> @@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>       if ( nestedhvm_paging_mode_hap(v) )
>>>       {
>>>           /* host nested paging + guest nested paging. */
>>> -        ns_vmcb->_np_enable = n2vmcb->_np_enable;
>>> +        ns_vmcb->_np = n2vmcb->_np;
>>>           ns_vmcb->_cr3 = n2vmcb->_cr3;
>>>           /* The vmcb->h_cr3 is the shadowed h_cr3. The original
>>>            * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
>>> @@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>       else if ( paging_mode_hap(v->domain) )
>>>       {
>>>           /* host nested paging + guest shadow paging. */
>>> -        ns_vmcb->_np_enable = 0;
>>> +        ns_vmcb->_np = false;
>>>           /* Throw h_cr3 away. Guest is not allowed to set it or
>>>            * it can break out, otherwise (security hole!) */
>>>           ns_vmcb->_h_cr3 = 0x0;
>>> @@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>       else
>>>       {
>>>           /* host shadow paging + guest shadow paging. */
>>> -        ns_vmcb->_np_enable = 0;
>>> +        ns_vmcb->_np = false;
>>>           ns_vmcb->_h_cr3 = 0x0;
>>>           /* The vmcb->_cr3 is the shadowed cr3. The original
>>>            * unshadowed guest cr3 is kept in ns_vmcb->_cr3,
>>
>> While spotting the small issue below it occurred to me: Why is it that
>> vmcb_set_...() is open-coded everywhere here? I think this would be
>> pretty nice to avoid at the same time (for lines touched anyway, or in
>> a separate prereq patch, or alternatively [and only ideally] for all
>> other instances in a follow-on patch). Thoughts?
> 
> Yes, I noticed this too. My plan was to send a followup patch for
> fixing all the instances where vmcb_set/get_...() can be used.
> There are bunch of other vmcb bits (apart from the ones being
> handled in this patchset) in this file and in svm.c which can
> benefit from using VMCB accessors.

To keep churn as well as effort to find commits touching individual lines
low, doing the conversion when touching lines anyway is imo preferable. A
follow-on patch can then convert what's left.

Jan
Vaishali Thakkar March 12, 2024, 10:54 a.m. UTC | #4
On 3/12/24 11:49, Jan Beulich wrote:
> On 12.03.2024 11:00, Vaishali Thakkar wrote:
>> On 3/12/24 08:54, Jan Beulich wrote:
>>> On 11.03.2024 13:40, Vaishali Thakkar wrote:
>>>> --- a/xen/arch/x86/hvm/svm/nestedsvm.c
>>>> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c
>>>> @@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        if ( nestedhvm_paging_mode_hap(v) )
>>>>        {
>>>>            /* host nested paging + guest nested paging. */
>>>> -        n2vmcb->_np_enable = 1;
>>>> +        n2vmcb->_np = true;
>>>>    
>>>>            nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
>>>>    
>>>> @@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        else if ( paging_mode_hap(v->domain) )
>>>>        {
>>>>            /* host nested paging + guest shadow paging. */
>>>> -        n2vmcb->_np_enable = 1;
>>>> +        n2vmcb->_np = true;
>>>>            /* Keep h_cr3 as it is. */
>>>>            n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
>>>>            /* When l1 guest does shadow paging
>>>> @@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        else
>>>>        {
>>>>            /* host shadow paging + guest shadow paging. */
>>>> -        n2vmcb->_np_enable = 0;
>>>> +        n2vmcb->_np = false;
>>>>            n2vmcb->_h_cr3 = 0x0;
>>>>    
>>>>            /* TODO: Once shadow-shadow paging is in place come back to here
>>>> @@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
>>>>        }
>>>>    
>>>>        /* nested paging for the guest */
>>>> -    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
>>>> +    svm->ns_hap_enabled = ns_vmcb->_np;
>>>>    
>>>>        /* Remember the V_INTR_MASK in hostflags */
>>>>        svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
>>>> @@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        if ( nestedhvm_paging_mode_hap(v) )
>>>>        {
>>>>            /* host nested paging + guest nested paging. */
>>>> -        ns_vmcb->_np_enable = n2vmcb->_np_enable;
>>>> +        ns_vmcb->_np = n2vmcb->_np;
>>>>            ns_vmcb->_cr3 = n2vmcb->_cr3;
>>>>            /* The vmcb->h_cr3 is the shadowed h_cr3. The original
>>>>             * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
>>>> @@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        else if ( paging_mode_hap(v->domain) )
>>>>        {
>>>>            /* host nested paging + guest shadow paging. */
>>>> -        ns_vmcb->_np_enable = 0;
>>>> +        ns_vmcb->_np = false;
>>>>            /* Throw h_cr3 away. Guest is not allowed to set it or
>>>>             * it can break out, otherwise (security hole!) */
>>>>            ns_vmcb->_h_cr3 = 0x0;
>>>> @@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
>>>>        else
>>>>        {
>>>>            /* host shadow paging + guest shadow paging. */
>>>> -        ns_vmcb->_np_enable = 0;
>>>> +        ns_vmcb->_np = false;
>>>>            ns_vmcb->_h_cr3 = 0x0;
>>>>            /* The vmcb->_cr3 is the shadowed cr3. The original
>>>>             * unshadowed guest cr3 is kept in ns_vmcb->_cr3,
>>>
>>> While spotting the small issue below it occurred to me: Why is it that
>>> vmcb_set_...() is open-coded everywhere here? I think this would be
>>> pretty nice to avoid at the same time (for lines touched anyway, or in
>>> a separate prereq patch, or alternatively [and only ideally] for all
>>> other instances in a follow-on patch). Thoughts?
>>
>> Yes, I noticed this too. My plan was to send a followup patch for
>> fixing all the instances where vmcb_set/get_...() can be used.
>> There are bunch of other vmcb bits (apart from the ones being
>> handled in this patchset) in this file and in svm.c which can
>> benefit from using VMCB accessors.
> 
> To keep churn as well as effort to find commits touching individual lines
> low, doing the conversion when touching lines anyway is imo preferable. A
> follow-on patch can then convert what's left.

Alright, I'll replace open coding with VMCB accessors for the lines which 
are touched by this patchset. And others in a followup patchset.

> Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index e4e01add8c..37548cf05c 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -571,7 +571,7 @@  static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
     if ( nestedhvm_paging_mode_hap(v) )
     {
         /* host nested paging + guest nested paging. */
-        n2vmcb->_np_enable = 1;
+        n2vmcb->_np = true;
 
         nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
 
@@ -585,7 +585,7 @@  static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
     else if ( paging_mode_hap(v->domain) )
     {
         /* host nested paging + guest shadow paging. */
-        n2vmcb->_np_enable = 1;
+        n2vmcb->_np = true;
         /* Keep h_cr3 as it is. */
         n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
         /* When l1 guest does shadow paging
@@ -601,7 +601,7 @@  static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
     else
     {
         /* host shadow paging + guest shadow paging. */
-        n2vmcb->_np_enable = 0;
+        n2vmcb->_np = false;
         n2vmcb->_h_cr3 = 0x0;
 
         /* TODO: Once shadow-shadow paging is in place come back to here
@@ -706,7 +706,7 @@  nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
     }
 
     /* nested paging for the guest */
-    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
+    svm->ns_hap_enabled = ns_vmcb->_np;
 
     /* Remember the V_INTR_MASK in hostflags */
     svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
@@ -1084,7 +1084,7 @@  nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
     if ( nestedhvm_paging_mode_hap(v) )
     {
         /* host nested paging + guest nested paging. */
-        ns_vmcb->_np_enable = n2vmcb->_np_enable;
+        ns_vmcb->_np = n2vmcb->_np;
         ns_vmcb->_cr3 = n2vmcb->_cr3;
         /* The vmcb->h_cr3 is the shadowed h_cr3. The original
          * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
@@ -1093,7 +1093,7 @@  nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
     else if ( paging_mode_hap(v->domain) )
     {
         /* host nested paging + guest shadow paging. */
-        ns_vmcb->_np_enable = 0;
+        ns_vmcb->_np = false;
         /* Throw h_cr3 away. Guest is not allowed to set it or
          * it can break out, otherwise (security hole!) */
         ns_vmcb->_h_cr3 = 0x0;
@@ -1104,7 +1104,7 @@  nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
     else
     {
         /* host shadow paging + guest shadow paging. */
-        ns_vmcb->_np_enable = 0;
+        ns_vmcb->_np = false;
         ns_vmcb->_h_cr3 = 0x0;
         /* The vmcb->_cr3 is the shadowed cr3. The original
          * unshadowed guest cr3 is kept in ns_vmcb->_cr3,
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b551eac807..1b38f6a494 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -473,7 +473,7 @@  static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb_set_np_enable(vmcb, 1);
+        vmcb_set_np(vmcb, 1);
         vmcb_set_g_pat(vmcb, MSR_IA32_CR_PAT_RESET /* guest PAT */);
         vmcb_set_h_cr3(vmcb, pagetable_get_paddr(p2m_get_pagetable(p2m)));
     }
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 282fe7cdbe..770a0228d4 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -133,7 +133,7 @@  static int construct_vmcb(struct vcpu *v)
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->_np_enable = 1; /* enable nested paging */
+        vmcb->_np = true; /* enable nested paging */
         vmcb->_g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
         vmcb->_h_cr3 = pagetable_get_paddr(
             p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index 91221ff4e2..76507576ba 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -473,12 +473,12 @@  struct vmcb_struct {
     intinfo_t exit_int_info;    /* offset 0x88 */
     union {                     /* offset 0x90 - cleanbit 4 */
         struct {
-            bool _np_enable     :1;
-            bool _sev_enable    :1;
-            bool _sev_es_enable :1;
-            bool _gmet          :1;
-            bool _np_sss        :1;
-            bool _vte           :1;
+            bool _np        :1;
+            bool _sev       :1;
+            bool _sev_es    :1;
+            bool _gmet      :1;
+            bool _np_sss    :1;
+            bool _vte       :1;
         };
         uint64_t _np_ctrl;
     };
@@ -645,9 +645,9 @@  VMCB_ACCESSORS(msrpm_base_pa, iopm)
 VMCB_ACCESSORS(guest_asid, asid)
 VMCB_ACCESSORS(vintr, tpr)
 VMCB_ACCESSORS(np_ctrl, np)
-VMCB_ACCESSORS_(np_enable, bool, np)
-VMCB_ACCESSORS_(sev_enable, bool, np)
-VMCB_ACCESSORS_(sev_es_enable, bool, np)
+VMCB_ACCESSORS_(np, bool, np)
+VMCB_ACCESSORS_(sev, bool, np)
+VMCB_ACCESSORS_(sev_es, bool, np)
 VMCB_ACCESSORS_(gmet, bool, np)
 VMCB_ACCESSORS_(vte, bool, np)
 VMCB_ACCESSORS(h_cr3, np)