@@ -350,6 +350,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vttbr);
+#ifndef CONFIG_KVM_ARM_NESTED_HYP
+ if (mmu->el2_vmid.vmid) {
+ vttbr = kvm_get_vttbr(&mmu->el2_vmid, mmu);
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vttbr);
+ }
+#endif
*last_ran = vcpu->vcpu_id;
}
@@ -60,10 +60,20 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
*/
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
+#ifndef CONFIG_KVM_ARM_NESTED_HYP
struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr);
+#else
+ /*
+ * When supporting nested virtualization, we can have multiple VMIDs
+ * in play for each VCPU in the VM, so it's really not worth it to try
+ * to quiesce the system and flush all the VMIDs that may be in use,
+ * instead just nuke the whole thing.
+ */
+ kvm_call_hyp(__kvm_flush_vm_context);
+#endif
}
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
@@ -71,6 +81,12 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, vttbr, ipa);
+#ifdef CONFIG_KVM_ARM_NESTED_HYP
+ if (!mmu->el2_vmid.vmid)
+ return; /* only if this mmu has el2 context */
+ vttbr = kvm_get_vttbr(&mmu->el2_vmid, mmu);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, vttbr, ipa);
+#endif
}
/*