@@ -4061,10 +4061,11 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu, u32 access,
+ struct x86_emulate_ctxt *ctxt, u32 access,
struct x86_exception *exception)
{
void *data = val;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r = X86EMUL_CONTINUE;
while (bytes) {
@@ -4098,7 +4099,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
+ return kvm_read_guest_virt_helper(addr, val, bytes, ctxt,
access | PFERR_FETCH_MASK,
exception);
}
@@ -4110,7 +4111,7 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+ return kvm_read_guest_virt_helper(addr, val, bytes, ctxt, access,
exception);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
@@ -4119,8 +4120,7 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+ return kvm_read_guest_virt_helper(addr, val, bytes, ctxt, 0, exception);
}
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
In the following patches, our adress caching struct that's embedded within struct x86_emulate_ctxt will need to be accessed Signed-off-by: Bandan Das <bsd@redhat.com> --- arch/x86/kvm/x86.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)