@@ -695,9 +695,10 @@ void x86_pmu_disable_all(void)
}
}
-struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data)
+struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr,
+ struct x86_guest_pebs *guest_pebs)
{
- return static_call(x86_pmu_guest_get_msrs)(nr, data);
+ return static_call(x86_pmu_guest_get_msrs)(nr, guest_pebs);
}
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nmi.h>
-#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
@@ -4053,11 +4052,11 @@ static int intel_pmu_hw_config(struct perf_event *event)
* when it uses {RD,WR}MSR, which should be handled by the KVM context,
* specifically in the intel_pmu_{get,set}_msr().
*/
-static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
+static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr,
+ struct x86_guest_pebs *guest_pebs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
- struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
int global_ctrl, pebs_enable;
@@ -4090,20 +4089,20 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
return arr;
}
- if (!kvm_pmu || !x86_pmu.pebs_ept)
+ if (!guest_pebs || !x86_pmu.pebs_ept)
return arr;
arr[(*nr)++] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_DS_AREA,
.host = (unsigned long)cpuc->ds,
- .guest = kvm_pmu->ds_area,
+ .guest = guest_pebs->ds_area,
};
if (x86_pmu.intel_cap.pebs_baseline) {
arr[(*nr)++] = (struct perf_guest_switch_msr){
.msr = MSR_PEBS_DATA_CFG,
.host = cpuc->active_pebs_data_cfg,
- .guest = kvm_pmu->pebs_data_cfg,
+ .guest = guest_pebs->data_cfg,
};
}
@@ -4119,8 +4118,8 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable].guest = 0;
} else {
/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
- arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
- arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
+ arr[pebs_enable].guest &= ~guest_pebs->cross_mapped_mask;
+ arr[global_ctrl].guest &= ~guest_pebs->cross_mapped_mask;
/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
arr[global_ctrl].guest |= arr[pebs_enable].guest;
}
@@ -4128,7 +4127,8 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
return arr;
}
-static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
+static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr,
+ struct x86_guest_pebs *guest_pebs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
@@ -920,7 +920,8 @@ struct x86_pmu {
/*
* Intel host/guest support (KVM)
*/
- struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data);
+ struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr,
+ struct x86_guest_pebs *guest_pebs);
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
@@ -548,15 +548,6 @@ struct kvm_pmu {
u64 pebs_data_cfg;
u64 pebs_data_cfg_mask;
- /*
- * If a guest counter is cross-mapped to host counter with different
- * index, its PEBS capability will be temporarily disabled.
- *
- * The user should make sure that this mask is updated
- * after disabling interrupts and before perf_guest_get_msrs();
- */
- u64 host_cross_mapped_mask;
-
/*
* The gate to release perf_events not marked in
* pmc_in_use only once in a vcpu time slice.
@@ -564,11 +564,19 @@ static inline void perf_events_lapic_init(void) { }
static inline void perf_check_microcode(void) { }
#endif
+struct x86_guest_pebs {
+ u64 ds_area;
+ u64 data_cfg;
+ u64 cross_mapped_mask;
+};
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
-extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
+extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr,
+ struct x86_guest_pebs *guest_pebs);
extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else
-struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
+struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr,
+ struct x86_guest_pebs *guest_pebs);
static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{
memset(lbr, 0, sizeof(*lbr));
@@ -765,11 +765,20 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
intel_pmu_release_guest_lbr_event(vcpu);
}
-void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
+u64 intel_pmu_get_cross_mapped_mask(struct kvm_pmu *pmu)
{
- struct kvm_pmc *pmc = NULL;
+ u64 host_cross_mapped_mask;
+ struct kvm_pmc *pmc;
int bit, hw_idx;
+ if (!(pmu->pebs_enable & pmu->global_ctrl))
+ return 0;
+
+ /*
+ * If a guest counter is cross-mapped to host counter with different
+ * index, its PEBS capability will be temporarily disabled.
+ */
+ host_cross_mapped_mask = 0;
for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
X86_PMC_IDX_MAX) {
pmc = intel_pmc_idx_to_pmc(pmu, bit);
@@ -784,8 +793,9 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
*/
hw_idx = pmc->perf_event->hw.idx;
if (hw_idx != pmc->idx && hw_idx > -1)
- pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
+ host_cross_mapped_mask |= BIT_ULL(hw_idx);
}
+ return host_cross_mapped_mask;
}
struct kvm_pmu_ops intel_pmu_ops __initdata = {
@@ -7131,12 +7131,14 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
struct perf_guest_switch_msr *msrs;
struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
- pmu->host_cross_mapped_mask = 0;
- if (pmu->pebs_enable & pmu->global_ctrl)
- intel_pmu_cross_mapped_check(pmu);
+ struct x86_guest_pebs guest_pebs = {
+ .ds_area = pmu->ds_area,
+ .data_cfg = pmu->pebs_data_cfg,
+ .cross_mapped_mask = intel_pmu_get_cross_mapped_mask(pmu),
+ };
/* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
- msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
+ msrs = perf_guest_get_msrs(&nr_msrs, &guest_pebs);
if (!msrs)
return;
@@ -670,7 +670,7 @@ static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
return !!vcpu_to_lbr_records(vcpu)->nr;
}
-void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
+u64 intel_pmu_get_cross_mapped_mask(struct kvm_pmu *pmu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
Have perf define a struct for getting guest PEBS data from KVM instead of poking into the kvm_pmu structure. Passing in an entire "struct kvm_pmu" _as an opaque pointer_ to get at three fields is silly, especially since one of the fields exists purely to convey information to perf, i.e. isn't used by KVM. Perf should also own its APIs, i.e. define what fields/data it needs, not rely on KVM to throw fields into data structures that effectively hold KVM-internal state. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/events/core.c | 5 +++-- arch/x86/events/intel/core.c | 18 +++++++++--------- arch/x86/events/perf_event.h | 3 ++- arch/x86/include/asm/kvm_host.h | 9 --------- arch/x86/include/asm/perf_event.h | 12 ++++++++++-- arch/x86/kvm/vmx/pmu_intel.c | 16 +++++++++++++--- arch/x86/kvm/vmx/vmx.c | 10 ++++++---- arch/x86/kvm/vmx/vmx.h | 2 +- 8 files changed, 44 insertions(+), 31 deletions(-)