@@ -4005,10 +4005,7 @@ static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
* unconditionally does a TLB flush on both nested VM-Enter and nested
* VM-Exit (via kvm_mmu_reset_context()).
*/
- if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
- svm_vmcb_set_flush_asid(svm->vmcb);
- else
- svm->current_vmcb->asid_generation--;
+ svm_vmcb_set_flush_asid(svm->vmcb);
}
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
@@ -645,7 +645,10 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
static inline void svm_vmcb_set_flush_asid(struct vmcb *vmcb)
{
- vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+ if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+ vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+ else
+ vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
static inline void svm_vmcb_clear_flush_asid(struct vmcb *vmcb)
Currently, if FLUSHBYASID is not available when performing a TLB flush, the fallback is decrementing the ASID generation to trigger allocating a new ASID. In preparation for using a static ASID per VM, just fallback to flushing everything if FLUSHBYASID is not available. This is probably worse from a performance perspective, but FLUSHBYASID has been around for ~15 years and it's not worth carrying the complexity. The fallback logic is moved within svm_vmcb_set_flush_asid(), as more callers will be added and will need the fallback as well. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> --- arch/x86/kvm/svm/svm.c | 5 +---- arch/x86/kvm/svm/svm.h | 5 ++++- 2 files changed, 5 insertions(+), 5 deletions(-)