@@ -3023,6 +3023,7 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
cap->events_mask = (unsigned int)x86_pmu.events_maskl;
cap->events_mask_len = x86_pmu.events_mask_len;
cap->pebs_ept = x86_pmu.pebs_ept;
+ cap->passthrough = !!(x86_pmu.flags & PMU_FL_PASSTHROUGH);
}
EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
@@ -6246,6 +6246,7 @@ __init int intel_pmu_init(void)
/* The perf side of core PMU is ready to support the passthrough vPMU. */
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_PASSTHROUGH_VPMU;
+ x86_pmu.flags |= PMU_FL_PASSTHROUGH;
/*
* Install the hw-cache-events table:
@@ -1020,6 +1020,7 @@ do { \
#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
+#define PMU_FL_PASSTHROUGH 0x800 /* Support passthrough mode */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -258,6 +258,7 @@ struct x86_pmu_capability {
unsigned int events_mask;
int events_mask_len;
unsigned int pebs_ept :1;
+ unsigned int passthrough :1;
};
/*