@@ -24,6 +24,7 @@ KVM_X86_PMU_OP(set_msr)
KVM_X86_PMU_OP(refresh)
KVM_X86_PMU_OP(init)
KVM_X86_PMU_OP(reset)
+KVM_X86_PMU_OP(get_eventsel_event_mask)
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
KVM_X86_PMU_OP_OPTIONAL(cleanup)
@@ -247,6 +247,19 @@ static int cmp_u64(const void *pa, const void *pb)
return (a > b) - (a < b);
}
+static inline u64 get_event_select(u64 eventsel)
+{
+ return eventsel & static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+}
+
+static inline u64 get_raw_event(u64 eventsel)
+{
+ u64 event_select = get_event_select(eventsel);
+ u64 unit_mask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
+
+ return event_select | unit_mask;
+}
+
static bool check_pmu_event_filter(struct kvm_pmc *pmc)
{
struct kvm_pmu_event_filter *filter;
@@ -263,7 +276,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
goto out;
if (pmc_is_gp(pmc)) {
- key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
+ key = get_raw_event(pmc->eventsel);
if (bsearch(&key, filter->events, filter->nevents,
sizeof(__u64), cmp_u64))
allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
@@ -40,6 +40,7 @@ struct kvm_pmu_ops {
void (*reset)(struct kvm_vcpu *vcpu);
void (*deliver_pmi)(struct kvm_vcpu *vcpu);
void (*cleanup)(struct kvm_vcpu *vcpu);
+ u64 (*get_eventsel_event_mask)(void);
};
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
@@ -294,6 +294,11 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
}
}
+static u64 amd_pmu_get_eventsel_event_mask(void)
+{
+ return AMD64_EVENTSEL_EVENT;
+}
+
struct kvm_pmu_ops amd_pmu_ops __initdata = {
.hw_event_available = amd_hw_event_available,
.pmc_is_enabled = amd_pmc_is_enabled,
@@ -307,4 +312,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.refresh = amd_pmu_refresh,
.init = amd_pmu_init,
.reset = amd_pmu_reset,
+ .get_eventsel_event_mask = amd_pmu_get_eventsel_event_mask,
};
@@ -793,6 +793,11 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}
}
+static u64 intel_pmu_get_eventsel_event_mask(void)
+{
+ return ARCH_PERFMON_EVENTSEL_EVENT;
+}
+
struct kvm_pmu_ops intel_pmu_ops __initdata = {
.hw_event_available = intel_hw_event_available,
.pmc_is_enabled = intel_pmc_is_enabled,
@@ -808,4 +813,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup,
+ .get_eventsel_event_mask = intel_pmu_get_eventsel_event_mask,
};
When checking if a pmu event the guest is attempting to program should be filtered, only consider the event select + unit mask in that decision. Use an architecture specific mask to mask out all other bits, including bits 35:32 on Intel. Those bits are not part of the event select and should not be considered in that decision. Fixes: 66bb8a065f5a ("KVM: x86: PMU Event Filter") Signed-off-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + arch/x86/kvm/pmu.c | 15 ++++++++++++++- arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/svm/pmu.c | 6 ++++++ arch/x86/kvm/vmx/pmu_intel.c | 6 ++++++ 5 files changed, 28 insertions(+), 1 deletion(-)