@@ -61,7 +61,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 intercept_dr;
u32 intercept_exceptions;
u64 intercept;
- u8 reserved_1[40];
+ u32 intercept_extended;
+ u8 reserved_1[36];
u16 pause_filter_thresh;
u16 pause_filter_count;
u64 iopm_base_pa;
@@ -121,6 +121,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
c->intercept_dr = h->intercept_dr;
c->intercept_exceptions = h->intercept_exceptions;
c->intercept = h->intercept;
+ c->intercept_extended = h->intercept_extended;
if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */
@@ -142,6 +143,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
c->intercept_dr |= g->intercept_dr;
c->intercept_exceptions |= g->intercept_exceptions;
c->intercept |= g->intercept;
+ c->intercept_extended |= g->intercept_extended;
}
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -151,6 +153,7 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
dst->intercept_dr = from->intercept_dr;
dst->intercept_exceptions = from->intercept_exceptions;
dst->intercept = from->intercept;
+ dst->intercept_extended = from->intercept_extended;
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
@@ -433,7 +436,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
nested_vmcb->control.intercept_cr >> 16,
nested_vmcb->control.intercept_exceptions,
- nested_vmcb->control.intercept);
+ nested_vmcb->control.intercept,
+ nested_vmcb->control.intercept_extended);
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
@@ -2801,6 +2801,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
+ pr_err("%-20s%08x\n", "intercepts (extended):", control->intercept_extended);
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
pr_err("%-20s%d\n", "pause filter threshold:",
control->pause_filter_thresh);
@@ -311,6 +311,24 @@ static inline void clr_intercept(struct vcpu_svm *svm, int bit)
recalc_intercepts(svm);
}
+static inline void set_extended_intercept(struct vcpu_svm *svm, int bit)
+{
+ struct vmcb *vmcb = get_host_vmcb(svm);
+
+ vmcb->control.intercept_extended |= (1U << bit);
+
+ recalc_intercepts(svm);
+}
+
+static inline void clr_extended_intercept(struct vcpu_svm *svm, int bit)
+{
+ struct vmcb *vmcb = get_host_vmcb(svm);
+
+ vmcb->control.intercept_extended &= ~(1U << bit);
+
+ recalc_intercepts(svm);
+}
+
static inline bool is_intercept(struct vcpu_svm *svm, int bit)
{
return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
@@ -544,14 +544,16 @@ TRACE_EVENT(kvm_nested_vmrun,
);
TRACE_EVENT(kvm_nested_intercepts,
- TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
- TP_ARGS(cr_read, cr_write, exceptions, intercept),
+ TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept,
+ __u32 extended),
+ TP_ARGS(cr_read, cr_write, exceptions, intercept, extended),
TP_STRUCT__entry(
__field( __u16, cr_read )
__field( __u16, cr_write )
__field( __u32, exceptions )
__field( __u64, intercept )
+ __field( __u32, extended )
),
TP_fast_assign(
@@ -559,11 +561,13 @@ TRACE_EVENT(kvm_nested_intercepts,
__entry->cr_write = cr_write;
__entry->exceptions = exceptions;
__entry->intercept = intercept;
+ __entry->extended = extended;
),
- TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
+ TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx"
+ "intercept (extended): %08x",
__entry->cr_read, __entry->cr_write, __entry->exceptions,
- __entry->intercept)
+ __entry->intercept, __entry->extended)
);
/*
* Tracepoint for #VMEXIT while nested
The new intercept bits have been added in vmcb control area to support the interception of INVPCID instruction. The following bit is added to the VMCB layout control area to control intercept of INVPCID: Byte Offset Bit(s) Function 14h 2 intercept INVPCID Add the interfaces to support these extended interception. Also update the tracing for extended intercepts. AMD documentation for INVPCID feature is available at "AMD64 Architecture Programmer’s Manual Volume 2: System Programming, Pub. 24593 Rev. 3.34(or later)" The documentation can be obtained at the links below: Link: https://www.amd.com/system/files/TechDocs/24593.pdf Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537 Signed-off-by: Babu Moger <babu.moger@amd.com> --- arch/x86/include/asm/svm.h | 3 ++- arch/x86/kvm/svm/nested.c | 6 +++++- arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 18 ++++++++++++++++++ arch/x86/kvm/trace.h | 12 ++++++++---- 5 files changed, 34 insertions(+), 6 deletions(-)