diff mbox series

[kvm-unit-tests,v3,03/11] x86: pmu: Add asserts to warn inconsistent fixed events and counters

Message ID 20240103031409.2504051-4-dapeng1.mi@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series pmu test bugs fix and improvements | expand

Commit Message

Mi, Dapeng Jan. 3, 2024, 3:14 a.m. UTC
Current PMU code deosn't check whether PMU fixed counter number is
larger than pre-defined fixed events. If so, it would cause memory
access out of range.

So add assert to warn this invalid case.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 x86/pmu.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

Comments

Mingwei Zhang March 27, 2024, 5:30 a.m. UTC | #1
On Wed, Jan 03, 2024, Dapeng Mi wrote:
> Current PMU code deosn't check whether PMU fixed counter number is
> larger than pre-defined fixed events. If so, it would cause memory
> access out of range.
> 
> So add assert to warn this invalid case.
> 
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Reviewed-by: Mingwei Zhang <mizhang@google.com>
> ---
>  x86/pmu.c | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/x86/pmu.c b/x86/pmu.c
> index a13b8a8398c6..a42fff8d8b36 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -111,8 +111,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>  		for (i = 0; i < gp_events_size; i++)
>  			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>  				return &gp_events[i];
> -	} else
> -		return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
> +	} else {
> +		int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
maybe unsigned int is better?
> +
> +		assert(idx < ARRAY_SIZE(fixed_events));
> +		return &fixed_events[idx];
> +	}
>  
>  	return (void*)0;
>  }
> @@ -245,6 +249,7 @@ static void check_fixed_counters(void)
>  	};
>  	int i;
>  
> +	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>  	for (i = 0; i < pmu.nr_fixed_counters; i++) {
>  		cnt.ctr = fixed_events[i].unit_sel;
>  		measure_one(&cnt);
> @@ -266,6 +271,7 @@ static void check_counters_many(void)
>  			gp_events[i % gp_events_size].unit_sel;
>  		n++;
>  	}
> +	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>  	for (i = 0; i < pmu.nr_fixed_counters; i++) {
>  		cnt[n].ctr = fixed_events[i].unit_sel;
>  		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
> -- 
> 2.34.1
>
Mi, Dapeng March 27, 2024, 6:43 a.m. UTC | #2
On 3/27/2024 1:30 PM, Mingwei Zhang wrote:
> On Wed, Jan 03, 2024, Dapeng Mi wrote:
>> Current PMU code deosn't check whether PMU fixed counter number is
>> larger than pre-defined fixed events. If so, it would cause memory
>> access out of range.
>>
>> So add assert to warn this invalid case.
>>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> Reviewed-by: Mingwei Zhang <mizhang@google.com>
>> ---
>>   x86/pmu.c | 10 ++++++++--
>>   1 file changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/x86/pmu.c b/x86/pmu.c
>> index a13b8a8398c6..a42fff8d8b36 100644
>> --- a/x86/pmu.c
>> +++ b/x86/pmu.c
>> @@ -111,8 +111,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>>   		for (i = 0; i < gp_events_size; i++)
>>   			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>>   				return &gp_events[i];
>> -	} else
>> -		return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
>> +	} else {
>> +		int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
> maybe unsigned int is better?

Make sense. Thanks for review.

>> +
>> +		assert(idx < ARRAY_SIZE(fixed_events));
>> +		return &fixed_events[idx];
>> +	}
>>   
>>   	return (void*)0;
>>   }
>> @@ -245,6 +249,7 @@ static void check_fixed_counters(void)
>>   	};
>>   	int i;
>>   
>> +	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>>   	for (i = 0; i < pmu.nr_fixed_counters; i++) {
>>   		cnt.ctr = fixed_events[i].unit_sel;
>>   		measure_one(&cnt);
>> @@ -266,6 +271,7 @@ static void check_counters_many(void)
>>   			gp_events[i % gp_events_size].unit_sel;
>>   		n++;
>>   	}
>> +	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>>   	for (i = 0; i < pmu.nr_fixed_counters; i++) {
>>   		cnt[n].ctr = fixed_events[i].unit_sel;
>>   		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
>> -- 
>> 2.34.1
>>
Jim Mattson March 27, 2024, 1:11 p.m. UTC | #3
On Tue, Jan 2, 2024 at 7:09 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>
> Current PMU code deosn't check whether PMU fixed counter number is
> larger than pre-defined fixed events. If so, it would cause memory
> access out of range.
>
> So add assert to warn this invalid case.
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---
>  x86/pmu.c | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/x86/pmu.c b/x86/pmu.c
> index a13b8a8398c6..a42fff8d8b36 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -111,8 +111,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>                 for (i = 0; i < gp_events_size; i++)
>                         if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>                                 return &gp_events[i];
> -       } else
> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
> +       } else {
> +               int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
> +
> +               assert(idx < ARRAY_SIZE(fixed_events));
> +               return &fixed_events[idx];
> +       }
>
>         return (void*)0;
>  }
> @@ -245,6 +249,7 @@ static void check_fixed_counters(void)
>         };
>         int i;
>
> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>         for (i = 0; i < pmu.nr_fixed_counters; i++) {
>                 cnt.ctr = fixed_events[i].unit_sel;
>                 measure_one(&cnt);
> @@ -266,6 +271,7 @@ static void check_counters_many(void)
>                         gp_events[i % gp_events_size].unit_sel;
>                 n++;
>         }
> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));

Can we assert this just once, in main()?

>         for (i = 0; i < pmu.nr_fixed_counters; i++) {
>                 cnt[n].ctr = fixed_events[i].unit_sel;
>                 cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
> --
> 2.34.1
>
Mi, Dapeng March 28, 2024, 9:29 a.m. UTC | #4
On 3/27/2024 9:11 PM, Jim Mattson wrote:
> On Tue, Jan 2, 2024 at 7:09 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>> Current PMU code deosn't check whether PMU fixed counter number is
>> larger than pre-defined fixed events. If so, it would cause memory
>> access out of range.
>>
>> So add assert to warn this invalid case.
>>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
>> ---
>>   x86/pmu.c | 10 ++++++++--
>>   1 file changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/x86/pmu.c b/x86/pmu.c
>> index a13b8a8398c6..a42fff8d8b36 100644
>> --- a/x86/pmu.c
>> +++ b/x86/pmu.c
>> @@ -111,8 +111,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>>                  for (i = 0; i < gp_events_size; i++)
>>                          if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>>                                  return &gp_events[i];
>> -       } else
>> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
>> +       } else {
>> +               int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
>> +
>> +               assert(idx < ARRAY_SIZE(fixed_events));
>> +               return &fixed_events[idx];
>> +       }
>>
>>          return (void*)0;
>>   }
>> @@ -245,6 +249,7 @@ static void check_fixed_counters(void)
>>          };
>>          int i;
>>
>> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>>          for (i = 0; i < pmu.nr_fixed_counters; i++) {
>>                  cnt.ctr = fixed_events[i].unit_sel;
>>                  measure_one(&cnt);
>> @@ -266,6 +271,7 @@ static void check_counters_many(void)
>>                          gp_events[i % gp_events_size].unit_sel;
>>                  n++;
>>          }
>> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
> Can we assert this just once, in main()?
sure. would do.
>
>>          for (i = 0; i < pmu.nr_fixed_counters; i++) {
>>                  cnt[n].ctr = fixed_events[i].unit_sel;
>>                  cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
>> --
>> 2.34.1
>>
diff mbox series

Patch

diff --git a/x86/pmu.c b/x86/pmu.c
index a13b8a8398c6..a42fff8d8b36 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -111,8 +111,12 @@  static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
 		for (i = 0; i < gp_events_size; i++)
 			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
 				return &gp_events[i];
-	} else
-		return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
+	} else {
+		int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
+
+		assert(idx < ARRAY_SIZE(fixed_events));
+		return &fixed_events[idx];
+	}
 
 	return (void*)0;
 }
@@ -245,6 +249,7 @@  static void check_fixed_counters(void)
 	};
 	int i;
 
+	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
 	for (i = 0; i < pmu.nr_fixed_counters; i++) {
 		cnt.ctr = fixed_events[i].unit_sel;
 		measure_one(&cnt);
@@ -266,6 +271,7 @@  static void check_counters_many(void)
 			gp_events[i % gp_events_size].unit_sel;
 		n++;
 	}
+	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
 	for (i = 0; i < pmu.nr_fixed_counters; i++) {
 		cnt[n].ctr = fixed_events[i].unit_sel;
 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;