diff mbox series

[v3,3/4] perf: arm_cspmu: Support implementation specific validation

Message ID 20230607083139.3498788-4-ilkka@os.amperecomputing.com (mailing list archive)
State New, archived
Headers show
Series perf: ampere: Add support for Ampere SoC PMUs | expand

Commit Message

Ilkka Koskinen June 7, 2023, 8:31 a.m. UTC
Some platforms may use e.g. different filtering mechanism and, thus,
may need different way to validate the events and group.

Signed-off-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>
---
 drivers/perf/arm_cspmu/arm_cspmu.c | 13 ++++++++++++-
 drivers/perf/arm_cspmu/arm_cspmu.h |  4 ++++
 2 files changed, 16 insertions(+), 1 deletion(-)

Comments

Robin Murphy June 20, 2023, 11:44 a.m. UTC | #1
On 07/06/2023 9:31 am, Ilkka Koskinen wrote:
> Some platforms may use e.g. different filtering mechanism and, thus,
> may need different way to validate the events and group.
> 
> Signed-off-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>
> ---
>   drivers/perf/arm_cspmu/arm_cspmu.c | 13 ++++++++++++-
>   drivers/perf/arm_cspmu/arm_cspmu.h |  4 ++++
>   2 files changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
> index 72ca4f56347c..9021d1878250 100644
> --- a/drivers/perf/arm_cspmu/arm_cspmu.c
> +++ b/drivers/perf/arm_cspmu/arm_cspmu.c
> @@ -559,7 +559,7 @@ static void arm_cspmu_disable(struct pmu *pmu)
>   static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
>   				struct perf_event *event)
>   {
> -	int idx;
> +	int idx, ret;
>   	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>   
>   	if (supports_cycle_counter(cspmu)) {
> @@ -593,6 +593,12 @@ static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
>   	if (idx >= cspmu->num_logical_ctrs)
>   		return -EAGAIN;
>   
> +	if (cspmu->impl.ops.validate_event) {
> +		ret = cspmu->impl.ops.validate_event(cspmu, event);
> +		if (ret)
> +			return ret;
> +	}
> +
>   	set_bit(idx, hw_events->used_ctrs);
>   
>   	return idx;
> @@ -618,6 +624,7 @@ static bool arm_cspmu_validate_event(struct pmu *pmu,
>    */
>   static bool arm_cspmu_validate_group(struct perf_event *event)
>   {
> +	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>   	struct perf_event *sibling, *leader = event->group_leader;
>   	struct arm_cspmu_hw_events fake_hw_events;
>   
> @@ -635,6 +642,10 @@ static bool arm_cspmu_validate_group(struct perf_event *event)
>   			return false;
>   	}
>   
> +	if (cspmu->impl.ops.validate_group &&
> +	    cspmu->impl.ops.validate_group(event))
> +		return false;

Hmm, this means that any driver wanting to use it has to duplicate all 
the group iteration logic, which isn't ideal. More than that, though, 
the way you've implemented it in patch #4 I'm not sure even does 
anything, since it only appears to be repeating the same checks that 
already happen in this path:

   arm_csmpu_validate_group()
     arm_cspmu_validate_event()
       arm_cspmu_get_event_idx()
         ops.validate_event() -> ampere_cspmu_validate_params()

so there's no need for the ops.validate_group hook to just call 
ampere_cspmu_validate_params() a second time when it's guaranteed to 
succeed (because otherwise we'd have bailed out already).

I think what we want overall is an "is this event config valid at all" 
hook from arm_cspmu_event_init() (which we don't really need to 
implement yet unless you want to start sanity-checking your actual 
rank/bank/threshold values), plus an "is this event schedulable in the 
given PMU context" hook from arm_cspmu_get_event_idx(), which should 
serve for both group validation via the fake context in event_init and 
actual scheduling in the real context in add.

Thanks,
Robin.

> +
>   	return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
>   }
>   
> diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
> index f89ae2077164..291cedb196ea 100644
> --- a/drivers/perf/arm_cspmu/arm_cspmu.h
> +++ b/drivers/perf/arm_cspmu/arm_cspmu.h
> @@ -106,6 +106,10 @@ struct arm_cspmu_impl_ops {
>   	void (*set_ev_filter)(struct arm_cspmu *cspmu,
>   			      struct hw_perf_event *hwc,
>   			      u32 filter);
> +	/* Implementation specific group validation */
> +	int (*validate_group)(struct perf_event *event);
> +	/* Implementation specific event validation */
> +	int (*validate_event)(struct arm_cspmu *cspmu, struct perf_event *new);
>   	/* Hide/show unsupported events */
>   	umode_t (*event_attr_is_visible)(struct kobject *kobj,
>   					 struct attribute *attr, int unused);
Ilkka Koskinen June 21, 2023, 10:09 p.m. UTC | #2
Hi Robin,

On Tue, 20 Jun 2023, Robin Murphy wrote:
> On 07/06/2023 9:31 am, Ilkka Koskinen wrote:
>> Some platforms may use e.g. different filtering mechanism and, thus,
>> may need different way to validate the events and group.
>> 
>> Signed-off-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>
>> ---
>>   drivers/perf/arm_cspmu/arm_cspmu.c | 13 ++++++++++++-
>>   drivers/perf/arm_cspmu/arm_cspmu.h |  4 ++++
>>   2 files changed, 16 insertions(+), 1 deletion(-)
>> 
>> diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c 
>> b/drivers/perf/arm_cspmu/arm_cspmu.c
>> index 72ca4f56347c..9021d1878250 100644
>> --- a/drivers/perf/arm_cspmu/arm_cspmu.c
>> +++ b/drivers/perf/arm_cspmu/arm_cspmu.c
>> @@ -559,7 +559,7 @@ static void arm_cspmu_disable(struct pmu *pmu)
>>   static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
>>   				struct perf_event *event)
>>   {
>> -	int idx;
>> +	int idx, ret;
>>   	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>>     	if (supports_cycle_counter(cspmu)) {
>> @@ -593,6 +593,12 @@ static int arm_cspmu_get_event_idx(struct 
>> arm_cspmu_hw_events *hw_events,
>>   	if (idx >= cspmu->num_logical_ctrs)
>>   		return -EAGAIN;
>>   +	if (cspmu->impl.ops.validate_event) {
>> +		ret = cspmu->impl.ops.validate_event(cspmu, event);
>> +		if (ret)
>> +			return ret;
>> +	}
>> +
>>   	set_bit(idx, hw_events->used_ctrs);
>>     	return idx;
>> @@ -618,6 +624,7 @@ static bool arm_cspmu_validate_event(struct pmu *pmu,
>>    */
>>   static bool arm_cspmu_validate_group(struct perf_event *event)
>>   {
>> +	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>>   	struct perf_event *sibling, *leader = event->group_leader;
>>   	struct arm_cspmu_hw_events fake_hw_events;
>>   @@ -635,6 +642,10 @@ static bool arm_cspmu_validate_group(struct 
>> perf_event *event)
>>   			return false;
>>   	}
>>   +	if (cspmu->impl.ops.validate_group &&
>> +	    cspmu->impl.ops.validate_group(event))
>> +		return false;
>
> Hmm, this means that any driver wanting to use it has to duplicate all the 
> group iteration logic, which isn't ideal. More than that, though, the way 
> you've implemented it in patch #4 I'm not sure even does anything, since it 
> only appears to be repeating the same checks that already happen in this 
> path:
>
>  arm_csmpu_validate_group()
>    arm_cspmu_validate_event()
>      arm_cspmu_get_event_idx()
>        ops.validate_event() -> ampere_cspmu_validate_params()
>
> so there's no need for the ops.validate_group hook to just call 
> ampere_cspmu_validate_params() a second time when it's guaranteed to succeed 
> (because otherwise we'd have bailed out already).

Yeah, I took another look how the framework really does it and you're 
absolutely correct, it's totally unnecessary.

>
> I think what we want overall is an "is this event config valid at all" hook 
> from arm_cspmu_event_init() (which we don't really need to implement yet 
> unless you want to start sanity-checking your actual rank/bank/threshold 
> values), plus an "is this event schedulable in the given PMU context" hook 
> from arm_cspmu_get_event_idx(), which should serve for both group validation 
> via the fake context in event_init and actual scheduling in the real context 
> in add.

Ah, that's true. I can already verify the group event has the same 
rank/bank/threshold settings as the group leader in ops.validate_event(). 
Thus, one hook seems enough.

I fix and rebase the patchset.

Cheers, Ilkka


> Thanks,
> Robin.
>
>> +
>>   	return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
>>   }
>>   diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h 
>> b/drivers/perf/arm_cspmu/arm_cspmu.h
>> index f89ae2077164..291cedb196ea 100644
>> --- a/drivers/perf/arm_cspmu/arm_cspmu.h
>> +++ b/drivers/perf/arm_cspmu/arm_cspmu.h
>> @@ -106,6 +106,10 @@ struct arm_cspmu_impl_ops {
>>   	void (*set_ev_filter)(struct arm_cspmu *cspmu,
>>   			      struct hw_perf_event *hwc,
>>   			      u32 filter);
>> +	/* Implementation specific group validation */
>> +	int (*validate_group)(struct perf_event *event);
>> +	/* Implementation specific event validation */
>> +	int (*validate_event)(struct arm_cspmu *cspmu, struct perf_event 
>> *new);
>>   	/* Hide/show unsupported events */
>>   	umode_t (*event_attr_is_visible)(struct kobject *kobj,
>>   					 struct attribute *attr, int unused);
>
diff mbox series

Patch

diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 72ca4f56347c..9021d1878250 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -559,7 +559,7 @@  static void arm_cspmu_disable(struct pmu *pmu)
 static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
 				struct perf_event *event)
 {
-	int idx;
+	int idx, ret;
 	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
 
 	if (supports_cycle_counter(cspmu)) {
@@ -593,6 +593,12 @@  static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
 	if (idx >= cspmu->num_logical_ctrs)
 		return -EAGAIN;
 
+	if (cspmu->impl.ops.validate_event) {
+		ret = cspmu->impl.ops.validate_event(cspmu, event);
+		if (ret)
+			return ret;
+	}
+
 	set_bit(idx, hw_events->used_ctrs);
 
 	return idx;
@@ -618,6 +624,7 @@  static bool arm_cspmu_validate_event(struct pmu *pmu,
  */
 static bool arm_cspmu_validate_group(struct perf_event *event)
 {
+	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
 	struct perf_event *sibling, *leader = event->group_leader;
 	struct arm_cspmu_hw_events fake_hw_events;
 
@@ -635,6 +642,10 @@  static bool arm_cspmu_validate_group(struct perf_event *event)
 			return false;
 	}
 
+	if (cspmu->impl.ops.validate_group &&
+	    cspmu->impl.ops.validate_group(event))
+		return false;
+
 	return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
 }
 
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index f89ae2077164..291cedb196ea 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -106,6 +106,10 @@  struct arm_cspmu_impl_ops {
 	void (*set_ev_filter)(struct arm_cspmu *cspmu,
 			      struct hw_perf_event *hwc,
 			      u32 filter);
+	/* Implementation specific group validation */
+	int (*validate_group)(struct perf_event *event);
+	/* Implementation specific event validation */
+	int (*validate_event)(struct arm_cspmu *cspmu, struct perf_event *new);
 	/* Hide/show unsupported events */
 	umode_t (*event_attr_is_visible)(struct kobject *kobj,
 					 struct attribute *attr, int unused);