diff mbox series

[v5,3/7] kvm: x86/pmu: prepare the pmu event filter for masked events

Message ID 20220920174603.302510-4-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Introduce and test masked events | expand

Commit Message

Aaron Lewis Sept. 20, 2022, 5:45 p.m. UTC
Create an internal representation for filter events to abstract the
events userspace uses from the events the kernel uses.  That will allow
the kernel to use a common event and a common code path between the
different types of filter events used in userspace once masked events
are introduced.

No functional changes intended

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/pmu.c | 116 ++++++++++++++++++++++++++++++++-------------
 arch/x86/kvm/pmu.h |  16 +++++++
 2 files changed, 98 insertions(+), 34 deletions(-)

Comments

Sean Christopherson Oct. 8, 2022, 12:08 a.m. UTC | #1
On Tue, Sep 20, 2022, Aaron Lewis wrote:
> Create an internal representation for filter events to abstract the
> events userspace uses from the events the kernel uses.  That will allow
> the kernel to use a common event and a common code path between the
> different types of filter events used in userspace once masked events
> are introduced.
> 
> No functional changes intended
> 
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> ---
>  arch/x86/kvm/pmu.c | 116 ++++++++++++++++++++++++++++++++-------------
>  arch/x86/kvm/pmu.h |  16 +++++++
>  2 files changed, 98 insertions(+), 34 deletions(-)
> 
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index e7d94e6b7f28..7ce8bfafea91 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -239,6 +239,19 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
>  	return true;
>  }

Hoisting the definition of the union here to make it easier to review.

> +
> +struct kvm_pmu_filter_entry {
> +	union {
> +		u64 raw;
> +		struct {
> +			u64 event_select:12;
> +			u64 unit_mask:8;
> +			u64 rsvd:44;
> +		};
> +	};
> +};
> +
> +#define KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask) \
> +	(((event_select) & 0xFFFULL) | \
> +	(((unit_mask) & 0xFFULL) << 12))
> +
>  #endif /* __KVM_X86_PMU_H */

...

> +static inline u16 get_event_select(u64 eventsel)
> +{
> +	u64 e = eventsel &
> +		static_call(kvm_x86_pmu_get_eventsel_event_mask)();
> +
> +	return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);

This is nasty.  It bleeds vendor details and is unnecessarily complex.  It also
forces this code to care about umask (more on that in patch 4).  Maybe the filter
code ends up caring about the umask anyways, but I think we can defer that until
it's actually necessary.

Rather than pack the data into an arbitrary format, preserve the architectural
format and fill in the gaps.  Then there's no need for a separate in-kernel
layout, and no need to encode anything.

Then this patch is a rather simple refactoring:

---
 arch/x86/kvm/pmu.c | 54 +++++++++++++++++++++++++++-------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 384cefbe20dd..882b0870735e 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -257,40 +257,50 @@ static int cmp_u64(const void *pa, const void *pb)
 	return (a > b) - (a < b);
 }
 
+static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
+{
+	return bsearch(&key, filter->events, filter->nevents,
+		       sizeof(filter->events[0]), cmp_u64);
+}
+
+static bool is_gp_event_allowed(struct kvm_pmu_event_filter *filter, u64 eventsel)
+{
+	if (find_filter_entry(filter, eventsel & kvm_pmu_ops.EVENTSEL_MASK))
+		return filter->action == KVM_PMU_EVENT_ALLOW;
+
+	return filter->action == KVM_PMU_EVENT_DENY;
+}
+
+static bool is_fixed_event_allowed(struct kvm_pmu_event_filter *filter, int idx)
+{
+	int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
+
+	if (filter->action == KVM_PMU_EVENT_DENY &&
+	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+	if (filter->action == KVM_PMU_EVENT_ALLOW &&
+	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+
+	return true;
+}
+
 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu_event_filter *filter;
 	struct kvm *kvm = pmc->vcpu->kvm;
-	bool allow_event = true;
-	__u64 key;
-	int idx;
 
 	if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
 		return false;
 
 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
 	if (!filter)
-		goto out;
+		return true;
 
-	if (pmc_is_gp(pmc)) {
-		key = pmc->eventsel & kvm_pmu_ops.EVENTSEL_MASK;
-		if (bsearch(&key, filter->events, filter->nevents,
-			    sizeof(__u64), cmp_u64))
-			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
-		else
-			allow_event = filter->action == KVM_PMU_EVENT_DENY;
-	} else {
-		idx = pmc->idx - INTEL_PMC_IDX_FIXED;
-		if (filter->action == KVM_PMU_EVENT_DENY &&
-		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-		if (filter->action == KVM_PMU_EVENT_ALLOW &&
-		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-	}
+	if (pmc_is_gp(pmc))
+		return is_gp_event_allowed(filter, pmc->eventsel);
 
-out:
-	return allow_event;
+	return is_fixed_event_allowed(filter, pmc->idx);
 }
 
 void reprogram_counter(struct kvm_pmc *pmc)

base-commit: 214272b6e5c3c64394cf52dc81bd5bf099167e5d
--
diff mbox series

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e7d94e6b7f28..7ce8bfafea91 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -239,6 +239,19 @@  static bool pmc_resume_counter(struct kvm_pmc *pmc)
 	return true;
 }
 
+static inline u16 get_event_select(u64 eventsel)
+{
+	u64 e = eventsel &
+		static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+
+	return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);
+}
+
+static inline u8 get_unit_mask(u64 eventsel)
+{
+	return (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+}
+
 static int cmp_u64(const void *pa, const void *pb)
 {
 	u64 a = *(u64 *)pa;
@@ -247,53 +260,61 @@  static int cmp_u64(const void *pa, const void *pb)
 	return (a > b) - (a < b);
 }
 
-static inline u64 get_event_select(u64 eventsel)
+static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
+{
+	return bsearch(&key, filter->events, filter->nevents,
+			  sizeof(filter->events[0]), cmp_u64);
+}
+
+static bool filter_contains_match(struct kvm_pmu_event_filter *filter,
+				  u64 eventsel)
 {
-	return eventsel & static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+	u16 event_select = get_event_select(eventsel);
+	u8 unit_mask = get_unit_mask(eventsel);
+	u64 key;
+
+	key = KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+	return find_filter_entry(filter, key);
 }
 
-static inline u64 get_raw_event(u64 eventsel)
+static bool is_gp_event_allowed(struct kvm_pmu_event_filter *filter, u64 eventsel)
 {
-	u64 event_select = get_event_select(eventsel);
-	u64 unit_mask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
+	if (filter_contains_match(filter, eventsel))
+		return filter->action == KVM_PMU_EVENT_ALLOW;
 
-	return event_select | unit_mask;
+	return filter->action == KVM_PMU_EVENT_DENY;
+}
+
+static bool is_fixed_event_allowed(struct kvm_pmu_event_filter *filter, int idx)
+{
+	int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
+
+	if (filter->action == KVM_PMU_EVENT_DENY &&
+	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+	if (filter->action == KVM_PMU_EVENT_ALLOW &&
+	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+
+	return true;
 }
 
 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu_event_filter *filter;
 	struct kvm *kvm = pmc->vcpu->kvm;
-	bool allow_event = true;
-	__u64 key;
-	int idx;
 
 	if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
 		return false;
 
 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
 	if (!filter)
-		goto out;
+		return true;
 
-	if (pmc_is_gp(pmc)) {
-		key = get_raw_event(pmc->eventsel);
-		if (bsearch(&key, filter->events, filter->nevents,
-			    sizeof(__u64), cmp_u64))
-			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
-		else
-			allow_event = filter->action == KVM_PMU_EVENT_DENY;
-	} else {
-		idx = pmc->idx - INTEL_PMC_IDX_FIXED;
-		if (filter->action == KVM_PMU_EVENT_DENY &&
-		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-		if (filter->action == KVM_PMU_EVENT_ALLOW &&
-		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-	}
+	if (pmc_is_gp(pmc))
+		return is_gp_event_allowed(filter, pmc->eventsel);
 
-out:
-	return allow_event;
+	return is_fixed_event_allowed(filter, pmc->idx);
 }
 
 void reprogram_counter(struct kvm_pmc *pmc)
@@ -609,6 +630,38 @@  static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)
 	filter->nevents = j;
 }
 
+static inline u64 encode_filter_entry(u64 event)
+{
+	u16 event_select = get_event_select(event);
+	u8 unit_mask = get_unit_mask(event);
+
+	return KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+}
+
+static void convert_to_filter_events(struct kvm_pmu_event_filter *filter)
+{
+	int i;
+
+	for (i = 0; i < filter->nevents; i++) {
+		u64 e = filter->events[i];
+
+		filter->events[i] = encode_filter_entry(e);
+	}
+}
+
+static void prepare_filter_events(struct kvm_pmu_event_filter *filter)
+{
+	remove_invalid_raw_events(filter);
+
+	convert_to_filter_events(filter);
+
+	/*
+	 * Sort the in-kernel list so that we can search it with bsearch.
+	 */
+	sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
+	     cmp_u64, NULL);
+}
+
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_pmu_event_filter tmp, *filter;
@@ -640,12 +693,7 @@  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 	/* Ensure nevents can't be changed between the user copies. */
 	*filter = tmp;
 
-	remove_invalid_raw_events(filter);
-
-	/*
-	 * Sort the in-kernel list so that we can search it with bsearch.
-	 */
-	sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
+	prepare_filter_events(filter);
 
 	mutex_lock(&kvm->lock);
 	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 4e22f9f55400..df4f81e5c685 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -205,4 +205,20 @@  bool is_vmware_backdoor_pmc(u32 pmc_idx);
 
 extern struct kvm_pmu_ops intel_pmu_ops;
 extern struct kvm_pmu_ops amd_pmu_ops;
+
+struct kvm_pmu_filter_entry {
+	union {
+		u64 raw;
+		struct {
+			u64 event_select:12;
+			u64 unit_mask:8;
+			u64 rsvd:44;
+		};
+	};
+};
+
+#define KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask) \
+	(((event_select) & 0xFFFULL) | \
+	(((unit_mask) & 0xFFULL) << 12))
+
 #endif /* __KVM_X86_PMU_H */