@@ -2338,4 +2338,11 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
*/
#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
+#define kvm_arch_young_notifier_likely_fast kvm_arch_young_notifier_likely_fast
+static inline bool kvm_arch_young_notifier_likely_fast(void)
+{
+ return IS_ENABLED(CONFIG_X86_64) && tdp_mmu_enabled &&
+ shadow_accessed_mask;
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
@@ -24,6 +24,7 @@ config KVM
select KVM_COMMON
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMU_NOTIFIER_YOUNG_LOCKLESS
+ select HAVE_KVM_YOUNG_FAST_ONLY_NOTIFIER
select HAVE_KVM_IRQCHIP
select HAVE_KVM_PFNCACHE
select HAVE_KVM_DIRTY_RING_TSO
@@ -1629,11 +1629,15 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+static int __kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range,
+ bool fast_only)
{
bool young = false;
if (kvm_memslots_have_rmaps(kvm)) {
+ if (fast_only)
+ return -1;
+
write_lock(&kvm->mmu_lock);
young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
write_unlock(&kvm->mmu_lock);
@@ -1642,14 +1646,18 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
- return young;
+ return (int)young;
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+static int __kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range,
+ bool fast_only)
{
bool young = false;
if (kvm_memslots_have_rmaps(kvm)) {
+ if (fast_only)
+ return -1;
+
write_lock(&kvm->mmu_lock);
young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
write_unlock(&kvm->mmu_lock);
@@ -1658,7 +1666,41 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
- return young;
+ return (int)young;
+}
+
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return __kvm_age_gfn(kvm, range, false);
+}
+
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return __kvm_test_age_gfn(kvm, range, false);
+}
+
+bool kvm_fast_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ int ret = __kvm_age_gfn(kvm, range, true);
+
+ if (ret < 0) {
+ *range->arg.failed = true;
+ return false;
+ }
+
+ return ret != 0;
+}
+
+bool kvm_fast_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ int ret = __kvm_test_age_gfn(kvm, range, true);
+
+ if (ret < 0) {
+ *range->arg.failed = true;
+ return false;
+ }
+
+ return ret != 0;
}
static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
The fast-only notifier will only report an accurate result when the shadow MMU is not in use. Implement kvm_arch_young_notifier_likely_fast(), as MGLRU will check this function to see if it should even be attempting the fast-only notifier. We only want to attempt the notifier if there is a chance that it will succeed (i.e., that we're using the TDP MMU). Signed-off-by: James Houghton <jthoughton@google.com> --- arch/x86/include/asm/kvm_host.h | 7 +++++ arch/x86/kvm/Kconfig | 1 + arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++++++++++++++--- 3 files changed, 54 insertions(+), 4 deletions(-)