@@ -124,6 +124,8 @@ extern void kvm_nested_s2_wp(struct kvm *kvm);
extern void kvm_nested_s2_unmap(struct kvm *kvm);
extern void kvm_nested_s2_flush(struct kvm *kvm);
+unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
+
static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
@@ -365,6 +365,95 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
return ret;
}
+static unsigned int ttl_to_size(u8 ttl)
+{
+ int level = ttl & 3;
+ int gran = (ttl >> 2) & 3;
+ unsigned int max_size = 0;
+
+ switch (gran) {
+ case TLBI_TTL_TG_4K:
+ switch (level) {
+ case 0:
+ break;
+ case 1:
+ max_size = SZ_1G;
+ break;
+ case 2:
+ max_size = SZ_2M;
+ break;
+ case 3:
+ max_size = SZ_4K;
+ break;
+ }
+ break;
+ case TLBI_TTL_TG_16K:
+ switch (level) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ max_size = SZ_32M;
+ break;
+ case 3:
+ max_size = SZ_16K;
+ break;
+ }
+ break;
+ case TLBI_TTL_TG_64K:
+ switch (level) {
+ case 0:
+ case 1:
+ /* No 52bit IPA support */
+ break;
+ case 2:
+ max_size = SZ_512M;
+ break;
+ case 3:
+ max_size = SZ_64K;
+ break;
+ }
+ break;
+ default: /* No size information */
+ break;
+ }
+
+ return max_size;
+}
+
+unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
+{
+ unsigned long max_size;
+ u8 ttl;
+
+ ttl = FIELD_GET(GENMASK_ULL(47, 44), val);
+
+ max_size = ttl_to_size(ttl);
+
+ if (!max_size) {
+ /* Compute the maximum extent of the invalidation */
+ switch (mmu->tlb_vtcr & VTCR_EL2_TG0_MASK) {
+ case VTCR_EL2_TG0_4K:
+ max_size = SZ_1G;
+ break;
+ case VTCR_EL2_TG0_16K:
+ max_size = SZ_32M;
+ break;
+ case VTCR_EL2_TG0_64K:
+ default: /* IMPDEF: treat any other value as 64k */
+ /*
+ * No, we do not support 52bit IPA in nested yet. Once
+ * we do, this should be 4TB.
+ */
+ max_size = SZ_512M;
+ break;
+ }
+ }
+
+ WARN_ON(!max_size);
+ return max_size;
+}
+
/*
* We can have multiple *different* MMU contexts with the same VMID:
*
@@ -2865,34 +2865,12 @@ static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
*
* - NS bit: we're non-secure only.
*
- * - TTL field: We already have the granule size from the
- * VTCR_EL2.TG0 field, and the level is only relevant to the
- * guest's S2PT.
- *
* - IPA[51:48]: We don't support 52bit IPA just yet...
*
* And of course, adjust the IPA to be on an actual address.
*/
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
-
- /* Compute the maximum extent of the invalidation */
- switch (mmu->tlb_vtcr & VTCR_EL2_TG0_MASK) {
- case VTCR_EL2_TG0_4K:
- max_size = SZ_1G;
- break;
- case VTCR_EL2_TG0_16K:
- max_size = SZ_32M;
- break;
- case VTCR_EL2_TG0_64K:
- default: /* IMPDEF: treat any other value as 64k */
- /*
- * No, we do not support 52bit IPA in nested yet. Once
- * we do, this should be 4TB.
- */
- max_size = SZ_512M;
- break;
- }
-
+ max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
base_addr &= ~(max_size - 1);
kvm_stage2_unmap_range(mmu, base_addr, max_size);
Support guest-provided information information to size the range of required invalidation. This helps with reducing over-invalidation, provided that the guest actually provides accurate information. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_nested.h | 2 + arch/arm64/kvm/nested.c | 89 +++++++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 24 +------- 3 files changed, 92 insertions(+), 23 deletions(-)