Message ID | b0e5dde517599e8af5aadbaff7dd4410e83fcf86.1710149462.git.vaishali.thakkar@vates.tech (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | x86/svm : Misc changes for few vmcb bits | expand |
On 11.03.2024 13:40, Vaishali Thakkar wrote: > @@ -698,11 +698,11 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, > /* Convert explicitely to boolean. Deals with l1 guests > * that use flush-by-asid w/o checking the cpuid bits */ > nv->nv_flushp2m = !!ns_vmcb->tlb_control; > - if ( svm->ns_guest_asid != ns_vmcb->_guest_asid ) > + if ( svm->ns_asid != ns_vmcb->_asid ) > { > nv->nv_flushp2m = 1; > hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); > - svm->ns_guest_asid = ns_vmcb->_guest_asid; > + svm->ns_asid = ns_vmcb->_asid; > } > > /* nested paging for the guest */ > @@ -1046,7 +1046,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) > /* Keep it. It's maintainted by the l1 guest. */ > > /* ASID */ > - /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */ > + /* ns_vmcb->_asid = n2vmcb->_asid; */ Unlike in the earlier patch, where I could accept the request to switch to using accessor functions as scope-creep-ish, here I'm pretty firm with my request to stop their open-coding at the same time. Unless of course there's a technical reason the accessors cannot be used here. Jan
On 3/12/24 08:59, Jan Beulich wrote: > On 11.03.2024 13:40, Vaishali Thakkar wrote: >> @@ -698,11 +698,11 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, >> /* Convert explicitely to boolean. Deals with l1 guests >> * that use flush-by-asid w/o checking the cpuid bits */ >> nv->nv_flushp2m = !!ns_vmcb->tlb_control; >> - if ( svm->ns_guest_asid != ns_vmcb->_guest_asid ) >> + if ( svm->ns_asid != ns_vmcb->_asid ) >> { >> nv->nv_flushp2m = 1; >> hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); >> - svm->ns_guest_asid = ns_vmcb->_guest_asid; >> + svm->ns_asid = ns_vmcb->_asid; >> } >> >> /* nested paging for the guest */ >> @@ -1046,7 +1046,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) >> /* Keep it. It's maintainted by the l1 guest. */ >> >> /* ASID */ >> - /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */ >> + /* ns_vmcb->_asid = n2vmcb->_asid; */ > > Unlike in the earlier patch, where I could accept the request to switch > to using accessor functions as scope-creep-ish, here I'm pretty firm > with my request to stop their open-coding at the same time. Unless of > course there's a technical reason the accessors cannot be used here. Yes, so as mentioned in the other patch's reply, I plan to tackle this instance too in the followup patchset along with others. So, if you're fine with it, I'll leave this one here for now. Unless you prefer otherwise. > Jan
On 12.03.2024 11:08, Vaishali Thakkar wrote: > On 3/12/24 08:59, Jan Beulich wrote: >> On 11.03.2024 13:40, Vaishali Thakkar wrote: >>> @@ -698,11 +698,11 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, >>> /* Convert explicitely to boolean. Deals with l1 guests >>> * that use flush-by-asid w/o checking the cpuid bits */ >>> nv->nv_flushp2m = !!ns_vmcb->tlb_control; >>> - if ( svm->ns_guest_asid != ns_vmcb->_guest_asid ) >>> + if ( svm->ns_asid != ns_vmcb->_asid ) >>> { >>> nv->nv_flushp2m = 1; >>> hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); >>> - svm->ns_guest_asid = ns_vmcb->_guest_asid; >>> + svm->ns_asid = ns_vmcb->_asid; >>> } >>> >>> /* nested paging for the guest */ >>> @@ -1046,7 +1046,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) >>> /* Keep it. It's maintainted by the l1 guest. */ >>> >>> /* ASID */ >>> - /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */ >>> + /* ns_vmcb->_asid = n2vmcb->_asid; */ >> >> Unlike in the earlier patch, where I could accept the request to switch >> to using accessor functions as scope-creep-ish, here I'm pretty firm >> with my request to stop their open-coding at the same time. Unless of >> course there's a technical reason the accessors cannot be used here. > > Yes, so as mentioned in the other patch's reply, I plan to tackle this > instance too in the followup patchset along with others. So, if you're > fine with it, I'll leave this one here for now. Unless you prefer otherwise. I thought I said pretty clearly that here I'm stronger with my request than on the other patch. Jan
diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c index 28e0dbc176..d5f70f8848 100644 --- a/xen/arch/x86/hvm/svm/asid.c +++ b/xen/arch/x86/hvm/svm/asid.c @@ -37,14 +37,14 @@ void svm_asid_handle_vmrun(void) /* ASID 0 indicates that ASIDs are disabled. */ if ( p_asid->asid == 0 ) { - vmcb_set_guest_asid(vmcb, 1); + vmcb_set_asid(vmcb, 1); vmcb->tlb_control = cpu_has_svm_flushbyasid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL; return; } - if ( vmcb_get_guest_asid(vmcb) != p_asid->asid ) - vmcb_set_guest_asid(vmcb, p_asid->asid); + if ( vmcb_get_asid(vmcb) != p_asid->asid ) + vmcb_set_asid(vmcb, p_asid->asid); vmcb->tlb_control = !need_flush ? TLB_CTRL_NO_FLUSH : diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c index 37548cf05c..adbd49b5a2 100644 --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -157,7 +157,7 @@ int cf_check nsvm_vcpu_reset(struct vcpu *v) svm->ns_hap_enabled = 0; svm->ns_vmcb_guestcr3 = 0; svm->ns_vmcb_hostcr3 = 0; - svm->ns_guest_asid = 0; + svm->ns_asid = 0; svm->ns_hostflags.bytes = 0; svm->ns_vmexit.exitinfo1 = 0; svm->ns_vmexit.exitinfo2 = 0; @@ -698,11 +698,11 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, /* Convert explicitely to boolean. Deals with l1 guests * that use flush-by-asid w/o checking the cpuid bits */ nv->nv_flushp2m = !!ns_vmcb->tlb_control; - if ( svm->ns_guest_asid != ns_vmcb->_guest_asid ) + if ( svm->ns_asid != ns_vmcb->_asid ) { nv->nv_flushp2m = 1; hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); - svm->ns_guest_asid = ns_vmcb->_guest_asid; + svm->ns_asid = ns_vmcb->_asid; } /* nested paging for the guest */ @@ -1046,7 +1046,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) /* Keep it. It's maintainted by the l1 guest. */ /* ASID */ - /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */ + /* ns_vmcb->_asid = n2vmcb->_asid; */ /* TLB control */ ns_vmcb->tlb_control = 0; diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c index 24358c6eea..0d714c728c 100644 --- a/xen/arch/x86/hvm/svm/svmdebug.c +++ b/xen/arch/x86/hvm/svm/svmdebug.c @@ -51,8 +51,8 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) vmcb->exitcode, vmcb->exit_int_info.raw); printk("exitinfo1 = %#"PRIx64" exitinfo2 = %#"PRIx64"\n", vmcb->exitinfo1, vmcb->exitinfo2); - printk("np_ctrl = %#"PRIx64" guest_asid = %#x\n", - vmcb_get_np_ctrl(vmcb), vmcb_get_guest_asid(vmcb)); + printk("np_ctrl = %#"PRIx64" asid = %#x\n", + vmcb_get_np_ctrl(vmcb), vmcb_get_asid(vmcb)); printk("virtual vmload/vmsave = %d, virt_ext = %#"PRIx64"\n", vmcb->virt_ext.fields.vloadsave_enable, vmcb->virt_ext.bytes); printk("cpl = %d efer = %#"PRIx64" star = %#"PRIx64" lstar = %#"PRIx64"\n", diff --git a/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h index 406fc082b1..7767cd6080 100644 --- a/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h +++ b/xen/arch/x86/include/asm/hvm/svm/nestedsvm.h @@ -51,7 +51,7 @@ struct nestedsvm { * the l1 guest nested page table */ uint64_t ns_vmcb_guestcr3, ns_vmcb_hostcr3; - uint32_t ns_guest_asid; + uint32_t ns_asid; bool ns_hap_enabled; diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h index 76507576ba..5d539fcdcf 100644 --- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h +++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h @@ -383,7 +383,7 @@ typedef union bool intercepts:1; /* 0: cr/dr/exception/general intercepts, * pause_filter_count, tsc_offset */ bool iopm:1; /* 1: iopm_base_pa, msrpm_base_pa */ - bool asid:1; /* 2: guest_asid */ + bool asid:1; /* 2: asid */ bool tpr:1; /* 3: vintr */ bool np:1; /* 4: np_enable, h_cr3, g_pat */ bool cr:1; /* 5: cr0, cr3, cr4, efer */ @@ -413,7 +413,7 @@ struct vmcb_struct { u64 _iopm_base_pa; /* offset 0x40 - cleanbit 1 */ u64 _msrpm_base_pa; /* offset 0x48 - cleanbit 1 */ u64 _tsc_offset; /* offset 0x50 - cleanbit 0 */ - u32 _guest_asid; /* offset 0x58 - cleanbit 2 */ + u32 _asid; /* offset 0x58 - cleanbit 2 */ u8 tlb_control; /* offset 0x5C - TLB_CTRL_* */ u8 res07[3]; vintr_t _vintr; /* offset 0x60 - cleanbit 3 */ @@ -642,7 +642,7 @@ VMCB_ACCESSORS(pause_filter_thresh, intercepts) VMCB_ACCESSORS(tsc_offset, intercepts) VMCB_ACCESSORS(iopm_base_pa, iopm) VMCB_ACCESSORS(msrpm_base_pa, iopm) -VMCB_ACCESSORS(guest_asid, asid) +VMCB_ACCESSORS(asid, asid) VMCB_ACCESSORS(vintr, tpr) VMCB_ACCESSORS(np_ctrl, np) VMCB_ACCESSORS_(np, bool, np)
The suffix _guest is redundant for asid bit. Drop it to avoid adding extra code volume. Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com> Signed-off-by: Vaishali Thakkar <vaishali.thakkar@vates.tech> --- Changes since v1: - This patch wasn't part of v1. It's been added to address Andrew's suggestion. --- xen/arch/x86/hvm/svm/asid.c | 6 +++--- xen/arch/x86/hvm/svm/nestedsvm.c | 8 ++++---- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++-- xen/arch/x86/include/asm/hvm/svm/nestedsvm.h | 2 +- xen/arch/x86/include/asm/hvm/svm/vmcb.h | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-)