@@ -3294,7 +3294,6 @@ static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
fault->slot = NULL;
fault->pfn = KVM_PFN_NOSLOT;
fault->map_writable = false;
- fault->hva = KVM_HVA_ERR_BAD;
/*
* If MMIO caching is disabled, emulate immediately without
@@ -4379,7 +4378,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
fault->write, &fault->map_writable,
- &fault->hva);
+ NULL);
/*
* If resolving the page failed because I/O is needed to fault-in the
@@ -4408,7 +4407,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
*/
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, true, true,
fault->write, &fault->map_writable,
- &fault->hva);
+ NULL);
return RET_PF_CONTINUE;
}
@@ -238,7 +238,6 @@ struct kvm_page_fault {
/* Outputs of kvm_faultin_pfn. */
unsigned long mmu_seq;
kvm_pfn_t pfn;
- hva_t hva;
bool map_writable;
/*
@@ -313,7 +312,6 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.is_private = err & PFERR_PRIVATE_ACCESS,
.pfn = KVM_PFN_ERR_FAULT,
- .hva = KVM_HVA_ERR_BAD,
};
int r;