Message ID | 20220825225755.907001-2-mizhang@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Extend KVM trace_kvm_nested_vmrun() to support VMX | expand |
On Thu, Aug 25, 2022, Mingwei Zhang wrote: > Update trace function for nested VM entry to support VMX. Existing trace > function only supports nested VMX and the information printed out is AMD > specific. > > So, update trace_kvm_nested_vmrun() to trace_kvm_nested_vmenter(), since > 'vmenter' is generic. Add a new field 'isa' to recognize Intel and AMD; > Update the output to print out VMX/SVM related naming respectively, eg., > vmcb vs. vmcs; npt vs. ept. > > Opportunistically update the call site of trace_kvm_nested_vmenter() to make > one line per parameter. > > Signed-off-by: Mingwei Zhang <mizhang@google.com> > --- > arch/x86/kvm/svm/nested.c | 6 ++++-- > arch/x86/kvm/trace.h | 28 ++++++++++++++++++---------- > arch/x86/kvm/x86.c | 2 +- > 3 files changed, 23 insertions(+), 13 deletions(-) > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 76dcc8a3e849..835c508eed8e 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -781,11 +781,13 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, > struct vcpu_svm *svm = to_svm(vcpu); > int ret; > > - trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, > + trace_kvm_nested_vmenter(svm->vmcb->save.rip, > + vmcb12_gpa, > vmcb12->save.rip, > vmcb12->control.int_ctl, > vmcb12->control.event_inj, > - vmcb12->control.nested_ctl); > + vmcb12->control.nested_ctl, > + KVM_ISA_SVM); Align indentation. > > trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, > vmcb12->control.intercepts[INTERCEPT_CR] >> 16, > diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h > index 2120d7c060a9..e7f0da9474f0 100644 > --- a/arch/x86/kvm/trace.h > +++ b/arch/x86/kvm/trace.h > @@ -589,10 +589,11 @@ TRACE_EVENT(kvm_pv_eoi, > /* > * Tracepoint for nested VMRUN > */ > -TRACE_EVENT(kvm_nested_vmrun, > +TRACE_EVENT(kvm_nested_vmenter, > TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, > - __u32 event_inj, bool npt), > - TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), > + __u32 event_inj, bool tdp_enabled, __u32 isa), > + TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, > + isa), Let this poke out, "isa" feels quite lonely here. It's a moot point when patch 3 comes along, mostly a "for future reference" thing. > TP_STRUCT__entry( > __field( __u64, rip ) > @@ -600,7 +601,8 @@ TRACE_EVENT(kvm_nested_vmrun, > __field( __u64, nested_rip ) > __field( __u32, int_ctl ) > __field( __u32, event_inj ) > - __field( bool, npt ) > + __field( bool, tdp_enabled ) > + __field( __u32, isa ) > ), > > TP_fast_assign( > @@ -609,14 +611,20 @@ TRACE_EVENT(kvm_nested_vmrun, > __entry->nested_rip = nested_rip; > __entry->int_ctl = int_ctl; > __entry->event_inj = event_inj; > - __entry->npt = npt; > + __entry->tdp_enabled = tdp_enabled; > + __entry->isa = isa; > ), > > - TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " > - "event_inj: 0x%08x npt: %s", > - __entry->rip, __entry->vmcb, __entry->nested_rip, > - __entry->int_ctl, __entry->event_inj, > - __entry->npt ? "on" : "off") > + TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx " > + "int_ctl: 0x%08x event_inj: 0x%08x nested_%s: %s", > + __entry->rip, > + __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb", > + __entry->vmcb, > + __entry->nested_rip, > + __entry->int_ctl, > + __entry->event_inj, > + __entry->isa == KVM_ISA_VMX ? "ept" : "npt", > + __entry->tdp_enabled ? "on" : "off") Align indentation.
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 76dcc8a3e849..835c508eed8e 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -781,11 +781,13 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vcpu_svm *svm = to_svm(vcpu); int ret; - trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, + trace_kvm_nested_vmenter(svm->vmcb->save.rip, + vmcb12_gpa, vmcb12->save.rip, vmcb12->control.int_ctl, vmcb12->control.event_inj, - vmcb12->control.nested_ctl); + vmcb12->control.nested_ctl, + KVM_ISA_SVM); trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, vmcb12->control.intercepts[INTERCEPT_CR] >> 16, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 2120d7c060a9..e7f0da9474f0 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -589,10 +589,11 @@ TRACE_EVENT(kvm_pv_eoi, /* * Tracepoint for nested VMRUN */ -TRACE_EVENT(kvm_nested_vmrun, +TRACE_EVENT(kvm_nested_vmenter, TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, - __u32 event_inj, bool npt), - TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), + __u32 event_inj, bool tdp_enabled, __u32 isa), + TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, + isa), TP_STRUCT__entry( __field( __u64, rip ) @@ -600,7 +601,8 @@ TRACE_EVENT(kvm_nested_vmrun, __field( __u64, nested_rip ) __field( __u32, int_ctl ) __field( __u32, event_inj ) - __field( bool, npt ) + __field( bool, tdp_enabled ) + __field( __u32, isa ) ), TP_fast_assign( @@ -609,14 +611,20 @@ TRACE_EVENT(kvm_nested_vmrun, __entry->nested_rip = nested_rip; __entry->int_ctl = int_ctl; __entry->event_inj = event_inj; - __entry->npt = npt; + __entry->tdp_enabled = tdp_enabled; + __entry->isa = isa; ), - TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " - "event_inj: 0x%08x npt: %s", - __entry->rip, __entry->vmcb, __entry->nested_rip, - __entry->int_ctl, __entry->event_inj, - __entry->npt ? "on" : "off") + TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx " + "int_ctl: 0x%08x event_inj: 0x%08x nested_%s: %s", + __entry->rip, + __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb", + __entry->vmcb, + __entry->nested_rip, + __entry->int_ctl, + __entry->event_inj, + __entry->isa == KVM_ISA_VMX ? "ept" : "npt", + __entry->tdp_enabled ? "on" : "off") ); TRACE_EVENT(kvm_nested_intercepts, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d7374d768296..0c36528ab345 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13375,7 +13375,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); -EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
Update trace function for nested VM entry to support VMX. Existing trace function only supports nested VMX and the information printed out is AMD specific. So, update trace_kvm_nested_vmrun() to trace_kvm_nested_vmenter(), since 'vmenter' is generic. Add a new field 'isa' to recognize Intel and AMD; Update the output to print out VMX/SVM related naming respectively, eg., vmcb vs. vmcs; npt vs. ept. Opportunistically update the call site of trace_kvm_nested_vmenter() to make one line per parameter. Signed-off-by: Mingwei Zhang <mizhang@google.com> --- arch/x86/kvm/svm/nested.c | 6 ++++-- arch/x86/kvm/trace.h | 28 ++++++++++++++++++---------- arch/x86/kvm/x86.c | 2 +- 3 files changed, 23 insertions(+), 13 deletions(-)