@@ -1113,4 +1113,10 @@ static inline void kvm_hyp_reserve(void) { }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
+#define kvm_arch_has_test_clear_young kvm_arch_has_test_clear_young
+static inline bool kvm_arch_has_test_clear_young(void)
+{
+ return cpu_has_hw_af() && !is_protected_kvm_enabled();
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
@@ -1678,6 +1678,42 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
range->start << PAGE_SHIFT);
}
+static int stage2_test_clear_young(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags flags)
+{
+ kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
+
+ VM_WARN_ON_ONCE(!page_count(virt_to_page(ctx->ptep)));
+
+ if (!kvm_pte_valid(new))
+ return 0;
+
+ if (new == ctx->old)
+ return 0;
+
+ if (kvm_should_clear_young(ctx->arg, ctx->addr / PAGE_SIZE))
+ stage2_try_set_pte(ctx, new);
+
+ return 0;
+}
+
+bool kvm_arch_test_clear_young(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ u64 start = range->start * PAGE_SIZE;
+ u64 end = range->end * PAGE_SIZE;
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_test_clear_young,
+ .arg = range,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_SHARED,
+ };
+
+ BUILD_BUG_ON(is_hyp_code());
+
+ kvm_pgtable_walk(kvm->arch.mmu.pgt, start, end - start, &walker);
+
+ return false;
+}
+
phys_addr_t kvm_mmu_get_httbr(void)
{
return __pa(hyp_pgtable->pgd);
Implement kvm_arch_test_clear_young() to support the fast path in mmu_notifier_ops->test_clear_young(). It focuses on a simple case, i.e., hardware sets the accessed bit in KVM PTEs and VMs are not protected, where it can rely on RCU and cmpxchg to safely clear the accessed bit without taking kvm->mmu_lock. Complex cases fall back to the existing slow path where kvm->mmu_lock is then taken. Signed-off-by: Yu Zhao <yuzhao@google.com> --- arch/arm64/include/asm/kvm_host.h | 6 ++++++ arch/arm64/kvm/mmu.c | 36 +++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+)