@@ -106,17 +106,36 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->shadow_root_level);
}
+struct kvm_page_fault {
+ /* arguments to kvm page fault handler */
+ struct kvm_vcpu *vcpu;
+ gpa_t cr2_or_gpa;
+ u32 error_code;
+ bool prefault;
+};
+
+static inline void kvm_page_fault_init(
+ struct kvm_page_fault *kpf, struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ u32 error_code, bool prefault)
+{
+ kpf->vcpu = vcpu;
+ kpf->cr2_or_gpa = cr2_or_gpa;
+ kpf->error_code = error_code;
+ kpf->prefault = prefault;
+}
+
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault);
-static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- u32 err, bool prefault)
+static inline int kvm_mmu_do_page_fault(struct kvm_page_fault *kpf)
{
#ifdef CONFIG_RETPOLINE
- if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
- return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
+ if (likely(kpf->vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
+ return kvm_tdp_page_fault(kpf->vcpu, kpf->cr2_or_gpa,
+ kpf->error_code, kpf->prefault);
#endif
- return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
+ return kpf->vcpu->arch.mmu->page_fault(kpf->vcpu, kpf->cr2_or_gpa,
+ kpf->error_code, kpf->prefault);
}
/*
@@ -5006,6 +5006,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
{
int r, emulation_type = EMULTYPE_PF;
bool direct = vcpu->arch.mmu->direct_map;
+ struct kvm_page_fault kpf;
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY;
@@ -5018,8 +5019,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
}
if (r == RET_PF_INVALID) {
- r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
- lower_32_bits(error_code), false);
+ kvm_page_fault_init(&kpf, vcpu, cr2_or_gpa,
+ lower_32_bits(error_code), false);
+ r = kvm_mmu_do_page_fault(&kpf);
if (WARN_ON_ONCE(r == RET_PF_INVALID))
return -EIO;
}
@@ -11083,6 +11083,7 @@ EXPORT_SYMBOL_GPL(kvm_set_rflags);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
+ struct kvm_page_fault kpf;
if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
work->wakeup_all)
@@ -11096,7 +11097,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
return;
- kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
+ kvm_page_fault_init(&kpf, vcpu, work->cr2_or_gpa, 0, true);
+ kvm_mmu_do_page_fault(&kpf);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
Introduce struct kvm_page_fault handler and its initialization function. Make the caller of kvm page fault handler allocate/initialize struct kvm_page_fault, and pass it to kvm_mmu_do_page_fault() instead of many arguments. No functional change is intended. Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> --- arch/x86/kvm/mmu.h | 29 ++++++++++++++++++++++++----- arch/x86/kvm/mmu/mmu.c | 6 ++++-- arch/x86/kvm/x86.c | 4 +++- 3 files changed, 31 insertions(+), 8 deletions(-)