Message ID | BLU437-SMTP8B9FAE86357F8D7414F0380440@phx.gbl (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Wanpeng Li <wanpeng.li@hotmail.com> writes: > Introduce __vmx_flush_tlb() to handle specific vpid. > > Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> > --- > arch/x86/kvm/vmx.c | 21 +++++++++++++-------- > 1 file changed, 13 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 794c529..7188c5e 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -1343,13 +1343,13 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) > __loaded_vmcs_clear, loaded_vmcs, 1); > } > > -static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) > +static inline void vpid_sync_vcpu_single(int vpid) > { > - if (vmx->vpid == 0) > + if (vpid == 0) > return; > > if (cpu_has_vmx_invvpid_single()) > - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); > + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); > } > > static inline void vpid_sync_vcpu_global(void) > @@ -1358,10 +1358,10 @@ static inline void vpid_sync_vcpu_global(void) > __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); > } > > -static inline void vpid_sync_context(struct vcpu_vmx *vmx) > +static inline void vpid_sync_context(int vpid) > { > if (cpu_has_vmx_invvpid_single()) > - vpid_sync_vcpu_single(vmx); > + vpid_sync_vcpu_single(vpid); > else > vpid_sync_vcpu_global(); > } Not sure myself what's the right thing to do but this may be undesirable in a nested environment. Assuming the processor supports global invalidation only, this seems like a easy way for the nested guest to invalidate *all* mappings - even the L1 specific mappings. > @@ -3450,9 +3450,9 @@ static void exit_lmode(struct kvm_vcpu *vcpu) > > #endif > > -static void vmx_flush_tlb(struct kvm_vcpu *vcpu) > +static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) > { > - vpid_sync_context(to_vmx(vcpu)); > + vpid_sync_context(vpid); > if (enable_ept) { > if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) > return; > @@ -3460,6 +3460,11 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) > } > } > > +static void vmx_flush_tlb(struct kvm_vcpu *vcpu) > +{ > + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); > +} > + > static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) > { > ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; > @@ -4795,7 +4800,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) > vmx_fpu_activate(vcpu); > update_exception_bitmap(vcpu); > > - vpid_sync_context(vmx); > + vpid_sync_context(vmx->vpid); > } > > /* -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 9/25/15 12:12 AM, Bandan Das wrote: > Wanpeng Li <wanpeng.li@hotmail.com> writes: > >> Introduce __vmx_flush_tlb() to handle specific vpid. >> >> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> >> --- >> arch/x86/kvm/vmx.c | 21 +++++++++++++-------- >> 1 file changed, 13 insertions(+), 8 deletions(-) >> >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c >> index 794c529..7188c5e 100644 >> --- a/arch/x86/kvm/vmx.c >> +++ b/arch/x86/kvm/vmx.c >> @@ -1343,13 +1343,13 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) >> __loaded_vmcs_clear, loaded_vmcs, 1); >> } >> >> -static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) >> +static inline void vpid_sync_vcpu_single(int vpid) >> { >> - if (vmx->vpid == 0) >> + if (vpid == 0) >> return; >> >> if (cpu_has_vmx_invvpid_single()) >> - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); >> + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); >> } >> >> static inline void vpid_sync_vcpu_global(void) >> @@ -1358,10 +1358,10 @@ static inline void vpid_sync_vcpu_global(void) >> __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); >> } >> >> -static inline void vpid_sync_context(struct vcpu_vmx *vmx) >> +static inline void vpid_sync_context(int vpid) >> { >> if (cpu_has_vmx_invvpid_single()) >> - vpid_sync_vcpu_single(vmx); >> + vpid_sync_vcpu_single(vpid); >> else >> vpid_sync_vcpu_global(); >> } > Not sure myself what's the right thing to do but this may be undesirable > in a nested environment. Assuming the processor supports global invalidation > only, this seems like a easy way for the nested guest to invalidate *all* > mappings - even the L1 specific mappings. Indeed, however, there's no easy way to handle the w/o single invalidation case, we can improve it if you have any idea, otherwise, it can be left to further optimization. :-) Regards, Wanpeng Li -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 24/09/2015 18:12, Bandan Das wrote: > Not sure myself what's the right thing to do but this may be undesirable > in a nested environment. Assuming the processor supports global invalidation > only, this seems like a easy way for the nested guest to invalidate *all* > mappings - even the L1 specific mappings. It's not a great thing but it's already what happens if you do a global INVEPT (it calls vmx_flush_tlb, which results in a global INVVPID if the single-context variant is not supported). Even without nested virt a single guest could slow down all other guests just by triggering frequent TLB flushes (e.g. by moving around a ROM BAR thousands of times per second). It would help to know _which_ processors actually don't support single-context INVVPIDs... Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 794c529..7188c5e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1343,13 +1343,13 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) __loaded_vmcs_clear, loaded_vmcs, 1); } -static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) +static inline void vpid_sync_vcpu_single(int vpid) { - if (vmx->vpid == 0) + if (vpid == 0) return; if (cpu_has_vmx_invvpid_single()) - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); } static inline void vpid_sync_vcpu_global(void) @@ -1358,10 +1358,10 @@ static inline void vpid_sync_vcpu_global(void) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } -static inline void vpid_sync_context(struct vcpu_vmx *vmx) +static inline void vpid_sync_context(int vpid) { if (cpu_has_vmx_invvpid_single()) - vpid_sync_vcpu_single(vmx); + vpid_sync_vcpu_single(vpid); else vpid_sync_vcpu_global(); } @@ -3450,9 +3450,9 @@ static void exit_lmode(struct kvm_vcpu *vcpu) #endif -static void vmx_flush_tlb(struct kvm_vcpu *vcpu) +static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) { - vpid_sync_context(to_vmx(vcpu)); + vpid_sync_context(vpid); if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; @@ -3460,6 +3460,11 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) } } +static void vmx_flush_tlb(struct kvm_vcpu *vcpu) +{ + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); +} + static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; @@ -4795,7 +4800,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx_fpu_activate(vcpu); update_exception_bitmap(vcpu); - vpid_sync_context(vmx); + vpid_sync_context(vmx->vpid); } /*
Introduce __vmx_flush_tlb() to handle specific vpid. Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> --- arch/x86/kvm/vmx.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-)