diff mbox series

[v5,06/18] x86: pmu: Add asserts to warn inconsistent fixed events and counters

Message ID 20240703095712.64202-7-dapeng1.mi@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series pmu test bugs fix and improvements | expand

Commit Message

Mi, Dapeng July 3, 2024, 9:57 a.m. UTC
Current PMU code deosn't check whether PMU fixed counter number is
larger than pre-defined fixed events. If so, it would cause memory
access out of range.

So add assert to warn this invalid case.

Reviewed-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 x86/pmu.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

Comments

Jim Mattson Aug. 22, 2024, 6:22 p.m. UTC | #1
On Tue, Jul 2, 2024 at 7:12 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>
> Current PMU code deosn't check whether PMU fixed counter number is
> larger than pre-defined fixed events. If so, it would cause memory
> access out of range.
>
> So add assert to warn this invalid case.
>
> Reviewed-by: Mingwei Zhang <mizhang@google.com>
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---
>  x86/pmu.c | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
>
> diff --git a/x86/pmu.c b/x86/pmu.c
> index b4de2680..3e0bf3a2 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -113,8 +113,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>                 for (i = 0; i < gp_events_size; i++)
>                         if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>                                 return &gp_events[i];
> -       } else
> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
> +       } else {
> +               unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
> +
> +               assert(idx < ARRAY_SIZE(fixed_events));

Won't this assertion result in a failure on bare metal, for CPUs
supporting fixed counter 3?

> +               return &fixed_events[idx];
> +       }
>
>         return (void*)0;
>  }
> @@ -740,6 +744,8 @@ int main(int ac, char **av)
>         printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
>         printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
>
> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
> +

And this one as well?

>         apic_write(APIC_LVTPC, PMI_VECTOR);
>
>         check_counters();
> --
> 2.40.1
>
Mi, Dapeng Aug. 26, 2024, 6:56 a.m. UTC | #2
On 8/23/2024 2:22 AM, Jim Mattson wrote:
> On Tue, Jul 2, 2024 at 7:12 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>> Current PMU code deosn't check whether PMU fixed counter number is
>> larger than pre-defined fixed events. If so, it would cause memory
>> access out of range.
>>
>> So add assert to warn this invalid case.
>>
>> Reviewed-by: Mingwei Zhang <mizhang@google.com>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
>> ---
>>  x86/pmu.c | 10 ++++++++--
>>  1 file changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/x86/pmu.c b/x86/pmu.c
>> index b4de2680..3e0bf3a2 100644
>> --- a/x86/pmu.c
>> +++ b/x86/pmu.c
>> @@ -113,8 +113,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>>                 for (i = 0; i < gp_events_size; i++)
>>                         if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>>                                 return &gp_events[i];
>> -       } else
>> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
>> +       } else {
>> +               unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
>> +
>> +               assert(idx < ARRAY_SIZE(fixed_events));
> Won't this assertion result in a failure on bare metal, for CPUs
> supporting fixed counter 3?

Yes, this is intended use. Currently KVM vPMU still doesn't support fixed
counter 3. If it's supported in KVM vPMU one day but forget to add
corresponding support in this pmu test, this assert would remind this.


>
>> +               return &fixed_events[idx];
>> +       }
>>
>>         return (void*)0;
>>  }
>> @@ -740,6 +744,8 @@ int main(int ac, char **av)
>>         printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
>>         printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
>>
>> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>> +
> And this one as well?
>
>>         apic_write(APIC_LVTPC, PMI_VECTOR);
>>
>>         check_counters();
>> --
>> 2.40.1
>>
Jim Mattson Aug. 26, 2024, 6:36 p.m. UTC | #3
On Sun, Aug 25, 2024 at 11:56 PM Mi, Dapeng <dapeng1.mi@linux.intel.com> wrote:
>
>
> On 8/23/2024 2:22 AM, Jim Mattson wrote:
> > On Tue, Jul 2, 2024 at 7:12 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
> >> Current PMU code deosn't check whether PMU fixed counter number is
> >> larger than pre-defined fixed events. If so, it would cause memory
> >> access out of range.
> >>
> >> So add assert to warn this invalid case.
> >>
> >> Reviewed-by: Mingwei Zhang <mizhang@google.com>
> >> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> >> ---
> >>  x86/pmu.c | 10 ++++++++--
> >>  1 file changed, 8 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/x86/pmu.c b/x86/pmu.c
> >> index b4de2680..3e0bf3a2 100644
> >> --- a/x86/pmu.c
> >> +++ b/x86/pmu.c
> >> @@ -113,8 +113,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
> >>                 for (i = 0; i < gp_events_size; i++)
> >>                         if (gp_events[i].unit_sel == (cnt->config & 0xffff))
> >>                                 return &gp_events[i];
> >> -       } else
> >> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
> >> +       } else {
> >> +               unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
> >> +
> >> +               assert(idx < ARRAY_SIZE(fixed_events));
> > Won't this assertion result in a failure on bare metal, for CPUs
> > supporting fixed counter 3?
>
> Yes, this is intended use. Currently KVM vPMU still doesn't support fixed
> counter 3. If it's supported in KVM vPMU one day but forget to add
> corresponding support in this pmu test, this assert would remind this.

These tests are supposed to run (and pass) on bare metal. Hence, they
should not be dependent on a non-architectural quirk of the KVM
implementation.

Perhaps a warning would serve as a reminder?

>
> >
> >> +               return &fixed_events[idx];
> >> +       }
> >>
> >>         return (void*)0;
> >>  }
> >> @@ -740,6 +744,8 @@ int main(int ac, char **av)
> >>         printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
> >>         printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
> >>
> >> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
> >> +
> > And this one as well?
> >
> >>         apic_write(APIC_LVTPC, PMI_VECTOR);
> >>
> >>         check_counters();
> >> --
> >> 2.40.1
> >>
Mi, Dapeng Aug. 27, 2024, 12:41 a.m. UTC | #4
On 8/27/2024 2:36 AM, Jim Mattson wrote:
> On Sun, Aug 25, 2024 at 11:56 PM Mi, Dapeng <dapeng1.mi@linux.intel.com> wrote:
>>
>> On 8/23/2024 2:22 AM, Jim Mattson wrote:
>>> On Tue, Jul 2, 2024 at 7:12 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>>>> Current PMU code deosn't check whether PMU fixed counter number is
>>>> larger than pre-defined fixed events. If so, it would cause memory
>>>> access out of range.
>>>>
>>>> So add assert to warn this invalid case.
>>>>
>>>> Reviewed-by: Mingwei Zhang <mizhang@google.com>
>>>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
>>>> ---
>>>>  x86/pmu.c | 10 ++++++++--
>>>>  1 file changed, 8 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/x86/pmu.c b/x86/pmu.c
>>>> index b4de2680..3e0bf3a2 100644
>>>> --- a/x86/pmu.c
>>>> +++ b/x86/pmu.c
>>>> @@ -113,8 +113,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
>>>>                 for (i = 0; i < gp_events_size; i++)
>>>>                         if (gp_events[i].unit_sel == (cnt->config & 0xffff))
>>>>                                 return &gp_events[i];
>>>> -       } else
>>>> -               return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
>>>> +       } else {
>>>> +               unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
>>>> +
>>>> +               assert(idx < ARRAY_SIZE(fixed_events));
>>> Won't this assertion result in a failure on bare metal, for CPUs
>>> supporting fixed counter 3?
>> Yes, this is intended use. Currently KVM vPMU still doesn't support fixed
>> counter 3. If it's supported in KVM vPMU one day but forget to add
>> corresponding support in this pmu test, this assert would remind this.
> These tests are supposed to run (and pass) on bare metal. Hence, they
> should not be dependent on a non-architectural quirk of the KVM
> implementation.
>
> Perhaps a warning would serve as a reminder?

Sounds reasonable. Would change to a warning. Thanks.


>
>>>> +               return &fixed_events[idx];
>>>> +       }
>>>>
>>>>         return (void*)0;
>>>>  }
>>>> @@ -740,6 +744,8 @@ int main(int ac, char **av)
>>>>         printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
>>>>         printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
>>>>
>>>> +       assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
>>>> +
>>> And this one as well?
>>>
>>>>         apic_write(APIC_LVTPC, PMI_VECTOR);
>>>>
>>>>         check_counters();
>>>> --
>>>> 2.40.1
>>>>
diff mbox series

Patch

diff --git a/x86/pmu.c b/x86/pmu.c
index b4de2680..3e0bf3a2 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -113,8 +113,12 @@  static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
 		for (i = 0; i < gp_events_size; i++)
 			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
 				return &gp_events[i];
-	} else
-		return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
+	} else {
+		unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
+
+		assert(idx < ARRAY_SIZE(fixed_events));
+		return &fixed_events[idx];
+	}
 
 	return (void*)0;
 }
@@ -740,6 +744,8 @@  int main(int ac, char **av)
 	printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
 	printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
 
+	assert(pmu.nr_fixed_counters <= ARRAY_SIZE(fixed_events));
+
 	apic_write(APIC_LVTPC, PMI_VECTOR);
 
 	check_counters();