@@ -781,6 +781,10 @@ struct kvm_vcpu_arch {
bool gpa_available;
gpa_t gpa_val;
+ /* GVA available */
+ bool gva_available;
+ gva_t gva_val;
+
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
@@ -522,6 +522,7 @@ struct vmx_msr_entry {
#define EPT_VIOLATION_READABLE_BIT 3
#define EPT_VIOLATION_WRITABLE_BIT 4
#define EPT_VIOLATION_EXECUTABLE_BIT 5
+#define EPT_VIOLATION_GVA_LINEAR_VALID 7
#define EPT_VIOLATION_GVA_TRANSLATED_BIT 8
#define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT)
#define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT)
@@ -5116,6 +5116,11 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & 0x100) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+ if (exit_qualification | EPT_VIOLATION_GVA_LINEAR_VALID) {
+ vcpu->arch.gva_available = true;
+ vcpu->arch.gva_val = vmcs_readl(GUEST_LINEAR_ADDRESS);
+ }
+
vcpu->arch.exit_qualification = exit_qualification;
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
@@ -8092,6 +8092,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_lapic_sync_from_vapic(vcpu);
vcpu->arch.gpa_available = false;
+ vcpu->arch.gva_available = false;
r = kvm_x86_ops->handle_exit(vcpu);
return r;
VMX supports providing the guest virtual address that caused and EPT violation. Add support for this so it can be used by the KVM XO feature. Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> --- arch/x86/include/asm/kvm_host.h | 4 ++++ arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/vmx/vmx.c | 5 +++++ arch/x86/kvm/x86.c | 1 + 4 files changed, 11 insertions(+)