@@ -345,6 +345,8 @@ int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
void kvm_nested_s2_clear(struct kvm *kvm);
void kvm_nested_s2_flush(struct kvm *kvm);
int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
+bool kvm_nested_s2_clear_curr_vmid(struct kvm_vcpu *vcpu, phys_addr_t start,
+ u64 size);
static inline u64 kvm_get_vttbr(struct kvm_s2_vmid *vmid,
struct kvm_s2_mmu *mmu)
@@ -349,6 +349,29 @@ static struct kvm_nested_s2_mmu *lookup_nested_mmu(struct kvm_vcpu *vcpu,
return NULL;
}
+/*
+ * Clear mappings in the shadow stage 2 page tables for the current VMID from
+ * the perspective of the guest hypervisor.
+ * This function expects kvm->mmu_lock to be held.
+ */
+bool kvm_nested_s2_clear_curr_vmid(struct kvm_vcpu *vcpu, phys_addr_t start,
+ u64 size)
+{
+ struct kvm_nested_s2_mmu *nested_mmu;
+ u64 vttbr = vcpu_sys_reg(vcpu, VTTBR_EL2);
+
+ /*
+ * Look up a mmu that is used for the current VMID from the guest
+ * hypervisor's view.
+ */
+ nested_mmu = lookup_nested_mmu(vcpu, vttbr);
+ if (!nested_mmu)
+ return false;
+
+ kvm_unmap_stage2_range(vcpu->kvm, &nested_mmu->mmu, start, size);
+ return true;
+}
+
/**
* create_nested_mmu - create mmu for the given virtual VMID
*
@@ -1820,6 +1820,35 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 vttbr;
+ struct kvm_s2_mmu *mmu;
+ bool ret;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ /*
+ * Clear mappings in the shadow page tables and invalidate the stage
+ * 1 and 2 TLB entries via kvm_tlb_flush_vmid_ipa() for the current
+ * VMID.
+ */
+ ret = kvm_nested_s2_clear_curr_vmid(vcpu, 0, KVM_PHYS_SIZE);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (!ret) {
+ /*
+ * Invalidate TLB entries explicitly for the case that the
+ * current VMID is for the host OS in the VM; we don't manage
+ * shadow stage 2 page tables for it.
+ */
+ mmu = &vcpu->kvm->arch.mmu;
+ vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
+ kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr);
+ }
+ return true;
+}
+
/*
* AT instruction emulation
*
@@ -1907,14 +1936,14 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
SYS_INSN_TO_DESC(TLBI_VAE2IS, handle_vae2, NULL),
SYS_INSN_TO_DESC(TLBI_ALLE1IS, handle_alle1is, NULL),
SYS_INSN_TO_DESC(TLBI_VALE2IS, handle_vae2, NULL),
- SYS_INSN_TO_DESC(TLBI_VMALLS12E1IS, NULL, NULL),
+ SYS_INSN_TO_DESC(TLBI_VMALLS12E1IS, handle_vmalls12e1is, NULL),
SYS_INSN_TO_DESC(TLBI_IPAS2E1, NULL, NULL),
SYS_INSN_TO_DESC(TLBI_IPAS2LE1, NULL, NULL),
SYS_INSN_TO_DESC(TLBI_ALLE2, handle_alle2, NULL),
SYS_INSN_TO_DESC(TLBI_VAE2, handle_vae2, NULL),
SYS_INSN_TO_DESC(TLBI_ALLE1, handle_alle1is, NULL),
SYS_INSN_TO_DESC(TLBI_VALE2, handle_vae2, NULL),
- SYS_INSN_TO_DESC(TLBI_VMALLS12E1, NULL, NULL),
+ SYS_INSN_TO_DESC(TLBI_VMALLS12E1, handle_vmalls12e1is, NULL),
};
#define reg_to_match_value(x) \
Based on the same principle as TLBI ALLE1(IS) emulation, we clear the mappings in the shadow stage-2 page tables and invalidate TLB entries. But this time we do it only for the current VMID from the guest hypervisor's perspective, not for all VMIDs. Signed-off-by: Jintack Lim <jintack.lim@linaro.org> --- arch/arm64/include/asm/kvm_mmu.h | 2 ++ arch/arm64/kvm/mmu-nested.c | 23 +++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 33 +++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 2 deletions(-)