@@ -3451,11 +3451,11 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb = NULL;
}
-int pre_sev_run(struct vcpu_svm *svm, int cpu)
+int pre_sev_run(struct vcpu_svm *svm, unsigned int *asid, bool *need_flush)
{
+ int cpu = svm->vcpu.cpu;
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
struct kvm *kvm = svm->vcpu.kvm;
- unsigned int asid = sev_get_asid(kvm);
/*
* Reject KVM_RUN if userspace attempts to run the vCPU with an invalid
@@ -3465,24 +3465,17 @@ int pre_sev_run(struct vcpu_svm *svm, int cpu)
if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa))
return -EINVAL;
- if (WARN_ON_ONCE(svm->vmcb->control.asid != asid)) {
- svm->vmcb->control.asid = asid;
- vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
- }
-
/*
- * Flush guest TLB:
- *
- * 1) when different VMCB for the same ASID is to be run on the same host CPU.
- * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
+ * Flush the guest TLB when a difference VMCB for the same ASID is to be
+ * run on the same host CPU. The caller will also flush the TLB if the
+ * VMCB was executed on a different host CPU in previous VMRUNs.
*/
- if (sd->sev_vmcbs[asid] == svm->vmcb &&
- svm->vcpu.arch.last_vmentry_cpu == cpu)
- return 0;
+ *asid = sev_get_asid(kvm);
+ if (sd->sev_vmcbs[*asid] != svm->vmcb) {
+ sd->sev_vmcbs[*asid] = svm->vmcb;
+ *need_flush = true;
+ }
- sd->sev_vmcbs[asid] = svm->vmcb;
- svm_vmcb_set_flush_asid(svm->vmcb);
- vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
return 0;
}
@@ -3615,21 +3615,23 @@ static int pre_svm_run(struct kvm_vcpu *vcpu)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned int asid = kvm_svm->asid;
+ bool sev_need_flush = false;
+
+ if (sev_guest(vcpu->kvm) && pre_sev_run(svm, &asid, &sev_need_flush))
+ return -1;
/*
* If the previous VMRUN of the VMCB occurred on a different physical
* CPU, then mark the VMCB dirty and flush the ASID. Hardware's
* VMCB clean bits are per logical CPU, as are KVM's ASID assignments.
*/
- if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
+ if (unlikely(sev_need_flush || svm->current_vmcb->cpu != vcpu->cpu)) {
svm_vmcb_set_flush_asid(svm->vmcb);
vmcb_mark_all_dirty(svm->vmcb);
svm->current_vmcb->cpu = vcpu->cpu;
}
- if (sev_guest(vcpu->kvm))
- return pre_sev_run(svm, vcpu->cpu);
-
/* Flush the ASID on every VMRUN if kvm_svm->asid allocation failed */
if (unlikely(!kvm_svm->asid))
svm_vmcb_set_flush_asid(svm->vmcb);
@@ -754,7 +754,7 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
/* sev.c */
-int pre_sev_run(struct vcpu_svm *svm, int cpu);
+int pre_sev_run(struct vcpu_svm *svm, unsigned int *asid, bool *need_flush);
void sev_init_vmcb(struct vcpu_svm *svm);
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
pre_svm_run() and pre_sev_run() now do some redundant work, and the control flow is not super clear. Specifically: - Both functions check if the ASID in the VMCB is the expected one. - Both functions check if the vCPU moved to a different physical CPU. - Both functions issue an ASID TLB flush if needed. Pass the ASID and whether or not SEV requires a TLB flush from pre_sev_run() to pre_svm_run(), and use the logic there instead. pre_sev_run() now only performs SEV-specific checks. Note that pre_sev_run() used svm->vcpu.arch.last_vmentry_cpu to check if the vCPU moved to a different physical CPU, while pre_svm_run uses svm->current_vmcb->cpu. The former tracks the CPU per vCPU, while the latter tracks it per VMCB. For SEV, they both should be equivalent since there is a single VMCB per-vCPU (nested is not supported). Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> --- arch/x86/kvm/svm/sev.c | 27 ++++++++++----------------- arch/x86/kvm/svm/svm.c | 10 ++++++---- arch/x86/kvm/svm/svm.h | 2 +- 3 files changed, 17 insertions(+), 22 deletions(-)