@@ -122,6 +122,7 @@
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE KVM_ARCH_REQ(34)
+#define KVM_REQ_APF_USER_READY KVM_ARCH_REQ(29)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -164,6 +165,7 @@
#define KVM_NR_VAR_MTRR 8
#define ASYNC_PF_PER_VCPU 64
+#define ASYNC_PF_USER_PER_VCPU 64
enum kvm_reg {
VCPU_REGS_RAX = __VCPU_REGS_RAX,
@@ -973,7 +975,7 @@ struct kvm_vcpu_arch {
struct {
bool halted;
- gfn_t gfns[ASYNC_PF_PER_VCPU];
+ gfn_t gfns[ASYNC_PF_PER_VCPU + ASYNC_PF_USER_PER_VCPU];
struct gfn_to_hva_cache data;
u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
@@ -983,6 +985,7 @@ struct kvm_vcpu_arch {
u32 host_apf_flags;
bool delivery_as_pf_vmexit;
bool pageready_pending;
+ bool pageready_user_pending;
} apf;
/* OSVW MSRs (AMD only) */
@@ -2266,11 +2269,18 @@ void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+bool kvm_arch_async_page_not_present_user(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user *apf);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_async_page_present_user(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user *apf);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_async_page_ready_user(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user *apf);
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
+void kvm_arch_async_page_present_user_queued(struct kvm_vcpu *vcpu);
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
@@ -209,4 +209,10 @@ config KVM_MAX_NR_VCPUS
the memory footprint of each KVM guest, regardless of how many vCPUs are
created for a given VM.
+config KVM_ASYNC_PF_USER
+ bool "Support for async PF handled by userspace"
+ depends on KVM && KVM_USERFAULT && KVM_ASYNC_PF && X86_64
+ help
+ Support for async PF handled by userspace.
+
endif # VIRTUALIZATION
@@ -515,6 +515,7 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
/* Check if there are APF page ready requests pending */
if (enabled) {
kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
+ kvm_make_request(KVM_REQ_APF_USER_READY, apic->vcpu);
kvm_xen_sw_enable_lapic(apic->vcpu);
}
}
@@ -2560,6 +2561,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_branch_slow_dec_deferred(&apic_hw_disabled);
/* Check if there are APF page ready requests pending */
kvm_make_request(KVM_REQ_APF_READY, vcpu);
+ kvm_make_request(KVM_REQ_APF_USER_READY, vcpu);
} else {
static_branch_inc(&apic_hw_disabled.key);
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
@@ -4304,6 +4304,25 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
}
+void kvm_arch_async_page_ready_user(struct kvm_vcpu *vcpu, struct kvm_async_pf_user *apf)
+{
+ int r;
+
+ if ((vcpu->arch.mmu->root_role.direct != apf->arch.direct_map) ||
+ apf->wakeup_all)
+ return;
+
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ return;
+
+ if (!vcpu->arch.mmu->root_role.direct &&
+ apf->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
+ return;
+
+ kvm_mmu_do_page_fault(vcpu, apf->cr2_or_gpa, apf->arch.error_code, true, NULL);
+}
+
static inline u8 kvm_max_level_for_order(int order)
{
BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
@@ -942,6 +942,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_clear_async_pf_user_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
/*
@@ -3569,6 +3570,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
if (!kvm_pv_async_pf_enabled(vcpu)) {
kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_clear_async_pf_user_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 0;
}
@@ -3581,6 +3583,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
kvm_async_pf_wakeup_all(vcpu);
+ kvm_async_pf_user_wakeup_all(vcpu);
return 0;
}
@@ -4019,6 +4022,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
kvm_check_async_pf_completion(vcpu);
+ vcpu->arch.apf.pageready_user_pending = false;
+ kvm_check_async_pf_user_completion(vcpu);
}
break;
case MSR_KVM_STEAL_TIME:
@@ -10924,6 +10929,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_vcpu_update_apicv(vcpu);
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
kvm_check_async_pf_completion(vcpu);
+ if (kvm_check_request(KVM_REQ_APF_USER_READY, vcpu))
+ kvm_check_async_pf_user_completion(vcpu);
if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
static_call(kvm_x86_msr_filter_changed)(vcpu);
@@ -12346,6 +12353,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvmclock_reset(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_clear_async_pf_user_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
@@ -12671,6 +12679,7 @@ static void kvm_unload_vcpu_mmus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_clear_async_pf_user_completion_queue(vcpu);
kvm_unload_vcpu_mmu(vcpu);
}
}
@@ -13119,6 +13128,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
+ if (!list_empty_careful(&vcpu->async_pf_user.done))
+ return true;
+
if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
kvm_apic_init_sipi_allowed(vcpu))
return true;
@@ -13435,6 +13447,37 @@ bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
}
}
+bool kvm_arch_async_page_not_present_user(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user *apf)
+{
+ struct x86_exception fault;
+
+ trace_kvm_async_pf_not_present(apf->arch.token, apf->cr2_or_gpa, 1);
+ kvm_add_async_pf_gfn(vcpu, apf->arch.gfn);
+
+ if (!apf_put_user_notpresent(vcpu)) {
+ fault.vector = PF_VECTOR;
+ fault.error_code_valid = true;
+ fault.error_code = 0;
+ fault.nested_page_fault = false;
+ fault.address = apf->arch.token;
+ fault.async_page_fault = true;
+ kvm_inject_page_fault(vcpu, &fault);
+ return true;
+ } else {
+ /*
+ * It is not possible to deliver a paravirtualized asynchronous
+ * page fault, but putting the guest in an artificial halt state
+ * can be beneficial nevertheless: if an interrupt arrives, we
+ * can deliver it timely and perhaps the guest will schedule
+ * another process. When the instruction that triggered a page
+ * fault is retried, hopefully the page will be ready in the host.
+ */
+ kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+ return false;
+ }
+}
+
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
@@ -13460,6 +13503,31 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
+void kvm_arch_async_page_present_user(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user *apf)
+{
+ struct kvm_lapic_irq irq = {
+ .delivery_mode = APIC_DM_FIXED,
+ .vector = vcpu->arch.apf.vec
+ };
+
+ if (apf->wakeup_all)
+ apf->arch.token = ~0; /* broadcast wakeup */
+ else
+ kvm_del_async_pf_gfn(vcpu, apf->arch.gfn);
+ trace_kvm_async_pf_ready(apf->arch.token, apf->cr2_or_gpa, 1);
+
+ if ((apf->wakeup_all || apf->notpresent_injected) &&
+ kvm_pv_async_pf_enabled(vcpu) &&
+ !apf_put_user_ready(vcpu, apf->arch.token)) {
+ vcpu->arch.apf.pageready_user_pending = true;
+ kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
+
+ vcpu->arch.apf.halted = false;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+}
+
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_APF_READY, vcpu);
@@ -13467,6 +13535,13 @@ void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
kvm_vcpu_kick(vcpu);
}
+void kvm_arch_async_page_present_user_queued(struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(KVM_REQ_APF_USER_READY, vcpu);
+ if (!vcpu->arch.apf.pageready_user_pending)
+ kvm_vcpu_kick(vcpu);
+}
+
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
{
if (!kvm_pv_async_pf_enabled(vcpu))
@@ -257,6 +257,27 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#ifdef CONFIG_KVM_ASYNC_PF_USER
+struct kvm_async_pf_user {
+ struct list_head link;
+ struct list_head queue;
+ gpa_t cr2_or_gpa;
+ struct kvm_arch_async_pf arch;
+ bool wakeup_all;
+ bool resolved;
+ bool notpresent_injected;
+};
+
+void kvm_clear_async_pf_user_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_user_completion(struct kvm_vcpu *vcpu);
+bool kvm_setup_async_pf_user(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
+int kvm_async_pf_user_wakeup_all(struct kvm_vcpu *vcpu);
+#endif
+
+int kvm_async_pf_user_ready(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf_user_ready *apf_ready);
+
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
unsigned long attributes;
@@ -368,6 +389,15 @@ struct kvm_vcpu {
} async_pf;
#endif
+#ifdef CONFIG_KVM_ASYNC_PF_USER
+ struct {
+ u32 queued;
+ struct list_head queue;
+ struct list_head done;
+ spinlock_t lock;
+ } async_pf_user;
+#endif
+
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
/*
* Cpu relax intercept or pause loop exit optimization
@@ -5,6 +5,7 @@
struct kvm;
struct kvm_async_pf;
+struct kvm_async_pf_user;
struct kvm_device_ops;
struct kvm_gfn_range;
struct kvm_interrupt;
@@ -1561,4 +1561,12 @@ struct kvm_fault {
#define KVM_READ_USERFAULT _IOR(KVMIO, 0xd5, struct kvm_fault)
+/* for KVM_ASYNC_PF_USER_READY */
+struct kvm_async_pf_user_ready {
+ /* in */
+ __u32 token;
+};
+
+#define KVM_ASYNC_PF_USER_READY _IOW(KVMIO, 0xd6, struct kvm_async_pf_user_ready)
+
#endif /* __LINUX_KVM_H */
@@ -45,6 +45,9 @@ config KVM_MMIO
config KVM_ASYNC_PF
bool
+config KVM_ASYNC_PF_USER
+ bool
+
# Toggle to switch between direct notification and batch job
config KVM_ASYNC_PF_SYNC
bool
@@ -9,6 +9,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
+kvm-$(CONFIG_KVM_ASYNC_PF_USER) += $(KVM)/async_pf_user.o
kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o
new file mode 100644
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * kvm support for asyncrhonous fault in userspace
+ *
+ * Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author:
+ * Nikita Kalyazin <kalyazin@amazon.com>
+ */
+
+#include <uapi/linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "async_pf_user.h"
+#include <trace/events/kvm.h>
+
+static struct kmem_cache *async_pf_user_cache;
+
+int kvm_async_pf_user_init(void)
+{
+ async_pf_user_cache = KMEM_CACHE(kvm_async_pf_user, 0);
+
+ if (!async_pf_user_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void kvm_async_pf_user_deinit(void)
+{
+ kmem_cache_destroy(async_pf_user_cache);
+ async_pf_user_cache = NULL;
+}
+
+void kvm_async_pf_user_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ INIT_LIST_HEAD(&vcpu->async_pf_user.done);
+ INIT_LIST_HEAD(&vcpu->async_pf_user.queue);
+ spin_lock_init(&vcpu->async_pf_user.lock);
+}
+
+int kvm_async_pf_user_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf_user_ready *apf_ready)
+{
+ struct kvm_async_pf_user *apf = NULL;
+ bool first;
+
+ spin_lock(&vcpu->async_pf_user.lock);
+ list_for_each_entry(apf, &vcpu->async_pf_user.queue, queue) {
+ if (apf->arch.token == apf_ready->token)
+ break;
+ }
+ spin_unlock(&vcpu->async_pf_user.lock);
+
+ if (unlikely(!apf || apf->arch.token != apf_ready->token))
+ return -EINVAL;
+
+ spin_lock(&vcpu->async_pf_user.lock);
+ first = list_empty(&vcpu->async_pf_user.done);
+ apf->resolved = true;
+ list_add_tail(&apf->link, &vcpu->async_pf_user.done);
+ spin_unlock(&vcpu->async_pf_user.lock);
+
+ kvm_arch_async_page_present_user_queued(vcpu);
+
+ if (first)
+ kvm_arch_async_page_present_user_queued(vcpu);
+
+ trace_kvm_async_pf_completed(0, apf->cr2_or_gpa, 1);
+
+ __kvm_vcpu_wake_up(vcpu);
+
+ return 0;
+}
+
+void kvm_clear_async_pf_user_completion_queue(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->async_pf_user.lock);
+
+ /* cancel outstanding work queue item */
+ while (!list_empty(&vcpu->async_pf_user.queue)) {
+ struct kvm_async_pf_user *apf =
+ list_first_entry(&vcpu->async_pf_user.queue,
+ typeof(*apf), queue);
+ list_del(&apf->queue);
+
+ /*
+ * If userspace has already notified us that the fault
+ * had been resolved, we will delete the item when
+ * iterating over the `done` list.
+ * Otherwise, we free it now, and if at a later point
+ * userspaces comes back regarding this fault, it will
+ * be rejected due to an inexistent token.
+ * Note that we do not have a way to "cancel" the work
+ * like with traditional (kernel) async pf.
+ */
+ if (!apf->resolved)
+ kmem_cache_free(async_pf_user_cache, apf);
+ }
+
+ while (!list_empty(&vcpu->async_pf_user.done)) {
+ struct kvm_async_pf_user *apf =
+ list_first_entry(&vcpu->async_pf_user.done,
+ typeof(*apf), link);
+ list_del(&apf->link);
+
+ /*
+ * Unlike with traditional (kernel) async pf,
+ * we know for sure that once the work has been queued,
+ * userspace has done with it and no residual resources
+ * are still being held by KVM.
+ */
+ kmem_cache_free(async_pf_user_cache, apf);
+ }
+ spin_unlock(&vcpu->async_pf_user.lock);
+
+ vcpu->async_pf_user.queued = 0;
+}
+
+void kvm_check_async_pf_user_completion(struct kvm_vcpu *vcpu)
+{
+ struct kvm_async_pf_user *apf;
+
+ while (!list_empty_careful(&vcpu->async_pf_user.done) &&
+ kvm_arch_can_dequeue_async_page_present(vcpu)) {
+ spin_lock(&vcpu->async_pf_user.lock);
+ apf = list_first_entry(&vcpu->async_pf_user.done, typeof(*apf),
+ link);
+ list_del(&apf->link);
+ spin_unlock(&vcpu->async_pf_user.lock);
+
+ kvm_arch_async_page_ready_user(vcpu, apf);
+ kvm_arch_async_page_present_user(vcpu, apf);
+
+ list_del(&apf->queue);
+ vcpu->async_pf_user.queued--;
+ }
+}
+
+/*
+ * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
+ * success, 'false' on failure (page fault has to be handled synchronously).
+ */
+bool kvm_setup_async_pf_user(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch)
+{
+ struct kvm_async_pf_user *apf;
+
+ if (vcpu->async_pf_user.queued >= ASYNC_PF_USER_PER_VCPU)
+ return false;
+
+ /*
+ * do alloc nowait since if we are going to sleep anyway we
+ * may as well sleep faulting in page
+ */
+ apf = kmem_cache_zalloc(async_pf_user_cache, GFP_NOWAIT | __GFP_NOWARN);
+ if (!apf)
+ return false;
+
+ apf->wakeup_all = false;
+ apf->cr2_or_gpa = cr2_or_gpa;
+ apf->arch = *arch;
+
+ list_add_tail(&apf->queue, &vcpu->async_pf_user.queue);
+ vcpu->async_pf_user.queued++;
+ apf->notpresent_injected = kvm_arch_async_page_not_present_user(vcpu, apf);
+
+ return true;
+}
+
+int kvm_async_pf_user_wakeup_all(struct kvm_vcpu *vcpu)
+{
+ struct kvm_async_pf_user *apf;
+ bool first;
+
+ if (!list_empty_careful(&vcpu->async_pf_user.done))
+ return 0;
+
+ apf = kmem_cache_zalloc(async_pf_user_cache, GFP_ATOMIC);
+ if (!apf)
+ return -ENOMEM;
+
+ apf->wakeup_all = true;
+ INIT_LIST_HEAD(&apf->queue); /* for list_del to work */
+
+ spin_lock(&vcpu->async_pf_user.lock);
+ first = list_empty(&vcpu->async_pf_user.done);
+ list_add_tail(&apf->link, &vcpu->async_pf_user.done);
+ spin_unlock(&vcpu->async_pf_user.lock);
+
+ if (first)
+ kvm_arch_async_page_present_user_queued(vcpu);
+
+ vcpu->async_pf_user.queued++;
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * kvm support for asyncrhonous fault in userspace
+ *
+ * Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author:
+ * Nikita Kalyazin <kalyazin@amazon.com>
+ */
+
+#ifndef __KVM_ASYNC_PF_USER_H__
+#define __KVM_ASYNC_PF_USER_H__
+
+#ifdef CONFIG_KVM_ASYNC_PF_USER
+int kvm_async_pf_user_init(void);
+void kvm_async_pf_user_deinit(void);
+void kvm_async_pf_user_vcpu_init(struct kvm_vcpu *vcpu);
+#else
+#define kvm_async_pf_user_init() (0)
+#define kvm_async_pf_user_deinit() do {} while (0)
+#define kvm_async_pf_user_vcpu_init(C) do {} while (0)
+#endif
+
+#endif
@@ -59,6 +59,7 @@
#include "coalesced_mmio.h"
#include "async_pf.h"
+#include "async_pf_user.h"
#include "kvm_mm.h"
#include "vfio.h"
@@ -493,6 +494,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
rcuwait_init(&vcpu->wait);
#endif
kvm_async_pf_vcpu_init(vcpu);
+ kvm_async_pf_user_vcpu_init(vcpu);
kvm_vcpu_set_in_spin_loop(vcpu, false);
kvm_vcpu_set_dy_eligible(vcpu, false);
@@ -4059,6 +4061,11 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
return true;
#endif
+#ifdef CONFIG_KVM_ASYNC_PF_USER
+ if (!list_empty_careful(&vcpu->async_pf_user.done))
+ return true;
+#endif
+
return false;
}
@@ -6613,6 +6620,10 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
if (r)
goto err_async_pf;
+ r = kvm_async_pf_user_init();
+ if (r)
+ goto err_async_pf_user;
+
kvm_chardev_ops.owner = module;
kvm_vm_fops.owner = module;
kvm_vcpu_fops.owner = module;
@@ -6644,6 +6655,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
err_register:
kvm_vfio_ops_exit();
err_vfio:
+ kvm_async_pf_user_deinit();
+err_async_pf_user:
kvm_async_pf_deinit();
err_async_pf:
kvm_irqfd_exit();
@@ -6677,6 +6690,7 @@ void kvm_exit(void)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_vfio_ops_exit();
+ kvm_async_pf_user_deinit();
kvm_async_pf_deinit();
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
unregister_syscore_ops(&kvm_syscore_ops);
Add both generic and x86-specific infrastructure for async PF. The functionality is gated by the KVM_ASYNC_PF_USER config option. The async PF user implementation is mostly isolated from the original (kernel) implementation. The only piece that is shared between the two is the struct apf within struct kvm_vcpu_arch (x86) that is tracking guest-facing state. Signed-off-by: Nikita Kalyazin <kalyazin@amazon.com> --- arch/x86/include/asm/kvm_host.h | 12 +- arch/x86/kvm/Kconfig | 6 + arch/x86/kvm/lapic.c | 2 + arch/x86/kvm/mmu/mmu.c | 19 +++ arch/x86/kvm/x86.c | 75 ++++++++++++ include/linux/kvm_host.h | 30 +++++ include/linux/kvm_types.h | 1 + include/uapi/linux/kvm.h | 8 ++ virt/kvm/Kconfig | 3 + virt/kvm/Makefile.kvm | 1 + virt/kvm/async_pf_user.c | 197 ++++++++++++++++++++++++++++++++ virt/kvm/async_pf_user.h | 24 ++++ virt/kvm/kvm_main.c | 14 +++ 13 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 virt/kvm/async_pf_user.c create mode 100644 virt/kvm/async_pf_user.h