@@ -55,7 +55,8 @@ extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+ int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
@@ -21,8 +21,8 @@
* have to use an IPI based mechanism. Until then, we stick to the simple
* hardware assisted version.
*
- * As v7 does not support flushing per IPA, just nuke the whole TLB
- * instead, ignoring the ipa value.
+ * As v7 does not support flushing per IPA (let alone any level hint),
+ * just nuke the whole TLB instead, ignoring the ipa value.
*/
void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
@@ -39,7 +39,8 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
write_sysreg(0, VTTBR);
}
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa, int level)
{
__kvm_tlb_flush_vmid(mmu);
}
@@ -59,7 +59,8 @@ extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+ int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_tlb_vae2is(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
@@ -132,7 +132,8 @@ static void __hyp_text __tlb_switch_to_host(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host_nvhe(cxt);
}
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa, int level)
{
struct tlb_inv_context cxt;
@@ -148,7 +149,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa
* whole of Stage-1. Weep...
*/
ipa >>= 12;
- __tlbi(ipas2e1is, ipa);
+ __tlbi_level(ipas2e1is, ipa, level);
/*
* We have to ensure completion of the invalidation at Stage-2,
@@ -76,7 +76,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, 0);
}
/*
Introduce a level hint to __kvm_tlb_flush_vmid_ipa. No functional change. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm/include/asm/kvm_asm.h | 3 ++- arch/arm/kvm/hyp/tlb.c | 7 ++++--- arch/arm64/include/asm/kvm_asm.h | 3 ++- arch/arm64/kvm/hyp/tlb.c | 5 +++-- virt/kvm/arm/mmu.c | 2 +- 5 files changed, 12 insertions(+), 8 deletions(-)