@@ -10,6 +10,9 @@ static void kvm_entry_tp(void *data, struct kvm_vcpu *vcpu)
{
if (trace_kvm_entry_enabled())
trace_kvm_entry(*vcpu_pc(vcpu));
+
+ if (trace_kvm_entry_v2_enabled())
+ trace_kvm_entry_v2(vcpu);
}
static void kvm_exit_tp(void *data, int ret, struct kvm_vcpu *vcpu)
@@ -17,6 +20,9 @@ static void kvm_exit_tp(void *data, int ret, struct kvm_vcpu *vcpu)
if (trace_kvm_exit_enabled())
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu),
*vcpu_pc(vcpu));
+
+ if (trace_kvm_exit_v2_enabled())
+ trace_kvm_exit_v2(ret, vcpu);
}
static int __init kvm_tp_init(void)
@@ -4,6 +4,7 @@
#include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h>
+#include <asm/kvm_emulate.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -30,6 +31,23 @@ TRACE_EVENT(kvm_entry,
TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
+TRACE_EVENT(kvm_entry_v2,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( unsigned long, vcpu_pc )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->vcpu_pc = *vcpu_pc(vcpu);
+ ),
+
+ TP_printk("vcpu: %u PC: 0x%016lx", __entry->vcpu_id, __entry->vcpu_pc)
+);
+
DECLARE_TRACE(kvm_exit_tp,
TP_PROTO(int ret, struct kvm_vcpu *vcpu),
TP_ARGS(ret, vcpu));
@@ -57,6 +75,33 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc)
);
+TRACE_EVENT(kvm_exit_v2,
+ TP_PROTO(int ret, struct kvm_vcpu *vcpu),
+ TP_ARGS(ret, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( int, ret )
+ __field( unsigned int, esr_ec )
+ __field( unsigned long, vcpu_pc )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->ret = ARM_EXCEPTION_CODE(ret);
+ __entry->esr_ec = ARM_EXCEPTION_IS_TRAP(ret) ?
+ kvm_vcpu_trap_get_class(vcpu): 0;
+ __entry->vcpu_pc = *vcpu_pc(vcpu);
+ ),
+
+ TP_printk("%s: vcpu: %u HSR_EC: 0x%04x (%s), PC: 0x%016lx",
+ __print_symbolic(__entry->ret, kvm_arm_exception_type),
+ __entry->vcpu_id,
+ __entry->esr_ec,
+ __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
+ __entry->vcpu_pc)
+);
+
TRACE_EVENT(kvm_guest_fault,
TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
unsigned long hxfar,
The existed trace events kvm_entry and kvm_exit don't contain the info for virtual CPU id, thus the perf tool has no chance to do statistics based on virtual CPU wise; and the trace events are ABI and we cannot change it to avoid ABI breakage. For above reasons, this patch adds two trace events kvm_entry_v2 and kvm_exit_v2 with a new field 'vcpu_id'. To support both the old and new events, we use the tracepoint callback to check if any event is enabled or not, if it's enabled then the callback will invoke the corresponding trace event. Signed-off-by: Leo Yan <leo.yan@linaro.org> --- arch/arm64/kvm/trace.c | 6 +++++ arch/arm64/kvm/trace_arm.h | 45 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+)