@@ -239,6 +239,19 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return true;
}
+static inline u16 get_event_select(u64 eventsel)
+{
+ u64 e = eventsel &
+ static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+
+ return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);
+}
+
+static inline u8 get_unit_mask(u64 eventsel)
+{
+ return (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+}
+
static int cmp_u64(const void *pa, const void *pb)
{
u64 a = *(u64 *)pa;
@@ -247,53 +260,61 @@ static int cmp_u64(const void *pa, const void *pb)
return (a > b) - (a < b);
}
-static inline u64 get_event_select(u64 eventsel)
+static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
+{
+ return bsearch(&key, filter->events, filter->nevents,
+ sizeof(filter->events[0]), cmp_u64);
+}
+
+static bool filter_contains_match(struct kvm_pmu_event_filter *filter,
+ u64 eventsel)
{
- return eventsel & static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+ u16 event_select = get_event_select(eventsel);
+ u8 unit_mask = get_unit_mask(eventsel);
+ u64 key;
+
+ key = KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+ return find_filter_entry(filter, key);
}
-static inline u64 get_raw_event(u64 eventsel)
+static bool is_gp_event_allowed(struct kvm_pmu_event_filter *filter, u64 eventsel)
{
- u64 event_select = get_event_select(eventsel);
- u64 unit_mask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
+ if (filter_contains_match(filter, eventsel))
+ return filter->action == KVM_PMU_EVENT_ALLOW;
- return event_select | unit_mask;
+ return filter->action == KVM_PMU_EVENT_DENY;
+}
+
+static bool is_fixed_event_allowed(struct kvm_pmu_event_filter *filter, int idx)
+{
+ int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
+
+ if (filter->action == KVM_PMU_EVENT_DENY &&
+ test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+ return false;
+ if (filter->action == KVM_PMU_EVENT_ALLOW &&
+ !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+ return false;
+
+ return true;
}
static bool check_pmu_event_filter(struct kvm_pmc *pmc)
{
struct kvm_pmu_event_filter *filter;
struct kvm *kvm = pmc->vcpu->kvm;
- bool allow_event = true;
- __u64 key;
- int idx;
if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
return false;
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
if (!filter)
- goto out;
+ return true;
- if (pmc_is_gp(pmc)) {
- key = get_raw_event(pmc->eventsel);
- if (bsearch(&key, filter->events, filter->nevents,
- sizeof(__u64), cmp_u64))
- allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
- else
- allow_event = filter->action == KVM_PMU_EVENT_DENY;
- } else {
- idx = pmc->idx - INTEL_PMC_IDX_FIXED;
- if (filter->action == KVM_PMU_EVENT_DENY &&
- test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
- allow_event = false;
- if (filter->action == KVM_PMU_EVENT_ALLOW &&
- !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
- allow_event = false;
- }
+ if (pmc_is_gp(pmc))
+ return is_gp_event_allowed(filter, pmc->eventsel);
-out:
- return allow_event;
+ return is_fixed_event_allowed(filter, pmc->idx);
}
void reprogram_counter(struct kvm_pmc *pmc)
@@ -609,6 +630,38 @@ static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)
filter->nevents = j;
}
+static inline u64 encode_filter_entry(u64 event)
+{
+ u16 event_select = get_event_select(event);
+ u8 unit_mask = get_unit_mask(event);
+
+ return KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+}
+
+static void convert_to_filter_events(struct kvm_pmu_event_filter *filter)
+{
+ int i;
+
+ for (i = 0; i < filter->nevents; i++) {
+ u64 e = filter->events[i];
+
+ filter->events[i] = encode_filter_entry(e);
+ }
+}
+
+static void prepare_filter_events(struct kvm_pmu_event_filter *filter)
+{
+ remove_invalid_raw_events(filter);
+
+ convert_to_filter_events(filter);
+
+ /*
+ * Sort the in-kernel list so that we can search it with bsearch.
+ */
+ sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
+ cmp_u64, NULL);
+}
+
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_pmu_event_filter tmp, *filter;
@@ -640,12 +693,7 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
/* Ensure nevents can't be changed between the user copies. */
*filter = tmp;
- remove_invalid_raw_events(filter);
-
- /*
- * Sort the in-kernel list so that we can search it with bsearch.
- */
- sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
+ prepare_filter_events(filter);
mutex_lock(&kvm->lock);
filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
@@ -205,4 +205,20 @@ bool is_vmware_backdoor_pmc(u32 pmc_idx);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
+
+struct kvm_pmu_filter_entry {
+ union {
+ u64 raw;
+ struct {
+ u64 event_select:12;
+ u64 unit_mask:8;
+ u64 rsvd:44;
+ };
+ };
+};
+
+#define KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask) \
+ (((event_select) & 0xFFFULL) | \
+ (((unit_mask) & 0xFFULL) << 12))
+
#endif /* __KVM_X86_PMU_H */