diff mbox series

[2/6] perf/x86/rapl: Rename rapl_pmu variables

Message ID 20240610100751.4855-3-Dhananjay.Ugwekar@amd.com (mailing list archive)
State Changes Requested
Delegated to: Mario Limonciello
Headers show
Series Add per-core RAPL energy counter support for AMD CPUs | expand

Commit Message

Dhananjay Ugwekar June 10, 2024, 10:07 a.m. UTC
Rename struct rapl_pmu variables from "pmu" to "rapl_pmu", to
avoid any confusion between the variables of two different
structs pmu and rapl_pmu. As rapl_pmu also contains a pointer to
struct pmu, which leads to situations in code like pmu->pmu,
which is needlessly confusing. Above scenario is replaced with
much more readable rapl_pmu->pmu with this change.

Also rename "pmus" member in rapl_pmus struct, for same reason.

No functional change.

Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
 arch/x86/events/rapl.c | 104 ++++++++++++++++++++---------------------
 1 file changed, 52 insertions(+), 52 deletions(-)

Comments

Zhang Rui June 11, 2024, 5:43 a.m. UTC | #1
On Mon, 2024-06-10 at 10:07 +0000, Dhananjay Ugwekar wrote:
> Rename struct rapl_pmu variables from "pmu" to "rapl_pmu", to
> avoid any confusion between the variables of two different
> structs pmu and rapl_pmu.
>  As rapl_pmu also contains a pointer to
> struct pmu, which leads to situations in code like pmu->pmu,
> which is needlessly confusing. Above scenario is replaced with
> much more readable rapl_pmu->pmu with this change.
> 
> Also rename "pmus" member in rapl_pmus struct, for same reason.
> 

As you are adding a new per_core pmu, can we just rename the current
rapl_pmu to something like rapl_pkg_pmus (as I mentioned in the
previous email, we can consider the current RAPL MSRs as package scope
on Intel platforms as well), and name the new one as rapl_core_pmus?

IMO, "rapl_pmus" + "rapl_pmus_per_core" is still confusing.

thanks,
rui

> No functional change.
> 
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
> ---
>  arch/x86/events/rapl.c | 104 ++++++++++++++++++++-------------------
> --
>  1 file changed, 52 insertions(+), 52 deletions(-)
> 
> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
> index 73be25e1f4b4..b4e2073a178e 100644
> --- a/arch/x86/events/rapl.c
> +++ b/arch/x86/events/rapl.c
> @@ -120,7 +120,7 @@ struct rapl_pmu {
>  struct rapl_pmus {
>         struct pmu              pmu;
>         unsigned int            nr_rapl_pmu;
> -       struct rapl_pmu         *pmus[] __counted_by(nr_rapl_pmu);
> +       struct rapl_pmu         *rapl_pmu[]
> __counted_by(nr_rapl_pmu);
>  };
>  
>  enum rapl_unit_quirk {
> @@ -164,7 +164,7 @@ static inline struct rapl_pmu
> *cpu_to_rapl_pmu(unsigned int cpu)
>          * The unsigned check also catches the '-1' return value for
> non
>          * existent mappings in the topology map.
>          */
> -       return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
> >pmus[rapl_pmu_idx] : NULL;
> +       return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
> >rapl_pmu[rapl_pmu_idx] : NULL;
>  }
>  
>  static inline u64 rapl_read_counter(struct perf_event *event)
> @@ -228,34 +228,34 @@ static void rapl_start_hrtimer(struct rapl_pmu
> *pmu)
>  
>  static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer
> *hrtimer)
>  {
> -       struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu,
> hrtimer);
> +       struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct
> rapl_pmu, hrtimer);
>         struct perf_event *event;
>         unsigned long flags;
>  
> -       if (!pmu->n_active)
> +       if (!rapl_pmu->n_active)
>                 return HRTIMER_NORESTART;
>  
> -       raw_spin_lock_irqsave(&pmu->lock, flags);
> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>  
> -       list_for_each_entry(event, &pmu->active_list, active_entry)
> +       list_for_each_entry(event, &rapl_pmu->active_list,
> active_entry)
>                 rapl_event_update(event);
>  
> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>  
> -       hrtimer_forward_now(hrtimer, pmu->timer_interval);
> +       hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval);
>  
>         return HRTIMER_RESTART;
>  }
>  
> -static void rapl_hrtimer_init(struct rapl_pmu *pmu)
> +static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
>  {
> -       struct hrtimer *hr = &pmu->hrtimer;
> +       struct hrtimer *hr = &rapl_pmu->hrtimer;
>  
>         hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>         hr->function = rapl_hrtimer_handle;
>  }
>  
> -static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
> +static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
>                                    struct perf_event *event)
>  {
>         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
> @@ -263,39 +263,39 @@ static void __rapl_pmu_event_start(struct
> rapl_pmu *pmu,
>  
>         event->hw.state = 0;
>  
> -       list_add_tail(&event->active_entry, &pmu->active_list);
> +       list_add_tail(&event->active_entry, &rapl_pmu->active_list);
>  
>         local64_set(&event->hw.prev_count, rapl_read_counter(event));
>  
> -       pmu->n_active++;
> -       if (pmu->n_active == 1)
> -               rapl_start_hrtimer(pmu);
> +       rapl_pmu->n_active++;
> +       if (rapl_pmu->n_active == 1)
> +               rapl_start_hrtimer(rapl_pmu);
>  }
>  
>  static void rapl_pmu_event_start(struct perf_event *event, int mode)
>  {
> -       struct rapl_pmu *pmu = event->pmu_private;
> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>         unsigned long flags;
>  
> -       raw_spin_lock_irqsave(&pmu->lock, flags);
> -       __rapl_pmu_event_start(pmu, event);
> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
> +       __rapl_pmu_event_start(rapl_pmu, event);
> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>  }
>  
>  static void rapl_pmu_event_stop(struct perf_event *event, int mode)
>  {
> -       struct rapl_pmu *pmu = event->pmu_private;
> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>         struct hw_perf_event *hwc = &event->hw;
>         unsigned long flags;
>  
> -       raw_spin_lock_irqsave(&pmu->lock, flags);
> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>  
>         /* mark event as deactivated and stopped */
>         if (!(hwc->state & PERF_HES_STOPPED)) {
> -               WARN_ON_ONCE(pmu->n_active <= 0);
> -               pmu->n_active--;
> -               if (pmu->n_active == 0)
> -                       hrtimer_cancel(&pmu->hrtimer);
> +               WARN_ON_ONCE(rapl_pmu->n_active <= 0);
> +               rapl_pmu->n_active--;
> +               if (rapl_pmu->n_active == 0)
> +                       hrtimer_cancel(&rapl_pmu->hrtimer);
>  
>                 list_del(&event->active_entry);
>  
> @@ -313,23 +313,23 @@ static void rapl_pmu_event_stop(struct
> perf_event *event, int mode)
>                 hwc->state |= PERF_HES_UPTODATE;
>         }
>  
> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>  }
>  
>  static int rapl_pmu_event_add(struct perf_event *event, int mode)
>  {
> -       struct rapl_pmu *pmu = event->pmu_private;
> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>         struct hw_perf_event *hwc = &event->hw;
>         unsigned long flags;
>  
> -       raw_spin_lock_irqsave(&pmu->lock, flags);
> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>  
>         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
>  
>         if (mode & PERF_EF_START)
> -               __rapl_pmu_event_start(pmu, event);
> +               __rapl_pmu_event_start(rapl_pmu, event);
>  
> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>  
>         return 0;
>  }
> @@ -343,7 +343,7 @@ static int rapl_pmu_event_init(struct perf_event
> *event)
>  {
>         u64 cfg = event->attr.config & RAPL_EVENT_MASK;
>         int bit, ret = 0;
> -       struct rapl_pmu *pmu;
> +       struct rapl_pmu *rapl_pmu;
>  
>         /* only look at RAPL events */
>         if (event->attr.type != rapl_pmus->pmu.type)
> @@ -373,11 +373,11 @@ static int rapl_pmu_event_init(struct
> perf_event *event)
>                 return -EINVAL;
>  
>         /* must be done before validate_group */
> -       pmu = cpu_to_rapl_pmu(event->cpu);
> -       if (!pmu)
> +       rapl_pmu = cpu_to_rapl_pmu(event->cpu);
> +       if (!rapl_pmu)
>                 return -EINVAL;
> -       event->cpu = pmu->cpu;
> -       event->pmu_private = pmu;
> +       event->cpu = rapl_pmu->cpu;
> +       event->pmu_private = rapl_pmu;
>         event->hw.event_base = rapl_msrs[bit].msr;
>         event->hw.config = cfg;
>         event->hw.idx = bit;
> @@ -560,22 +560,22 @@ static struct perf_msr amd_rapl_msrs[] = {
>  static int rapl_cpu_offline(unsigned int cpu)
>  {
>         const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> -       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> +       struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
>         int target;
>  
>         /* Check if exiting cpu is used for collecting rapl events */
>         if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
>                 return 0;
>  
> -       pmu->cpu = -1;
> +       rapl_pmu->cpu = -1;
>         /* Find a new cpu to collect rapl events */
>         target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>  
>         /* Migrate rapl events to the new target */
>         if (target < nr_cpu_ids) {
>                 cpumask_set_cpu(target, &rapl_cpu_mask);
> -               pmu->cpu = target;
> -               perf_pmu_migrate_context(pmu->pmu, cpu, target);
> +               rapl_pmu->cpu = target;
> +               perf_pmu_migrate_context(rapl_pmu->pmu, cpu, target);
>         }
>         return 0;
>  }
> @@ -584,21 +584,21 @@ static int rapl_cpu_online(unsigned int cpu)
>  {
>         unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>         const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> -       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> +       struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
>         int target;
>  
> -       if (!pmu) {
> -               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL,
> cpu_to_node(cpu));
> -               if (!pmu)
> +       if (!rapl_pmu) {
> +               rapl_pmu = kzalloc_node(sizeof(*rapl_pmu),
> GFP_KERNEL, cpu_to_node(cpu));
> +               if (!rapl_pmu)
>                         return -ENOMEM;
>  
> -               raw_spin_lock_init(&pmu->lock);
> -               INIT_LIST_HEAD(&pmu->active_list);
> -               pmu->pmu = &rapl_pmus->pmu;
> -               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
> -               rapl_hrtimer_init(pmu);
> +               raw_spin_lock_init(&rapl_pmu->lock);
> +               INIT_LIST_HEAD(&rapl_pmu->active_list);
> +               rapl_pmu->pmu = &rapl_pmus->pmu;
> +               rapl_pmu->timer_interval =
> ms_to_ktime(rapl_timer_ms);
> +               rapl_hrtimer_init(rapl_pmu);
>  
> -               rapl_pmus->pmus[rapl_pmu_idx] = pmu;
> +               rapl_pmus->rapl_pmu[rapl_pmu_idx] = rapl_pmu;
>         }
>  
>         /*
> @@ -610,7 +610,7 @@ static int rapl_cpu_online(unsigned int cpu)
>                 return 0;
>  
>         cpumask_set_cpu(cpu, &rapl_cpu_mask);
> -       pmu->cpu = cpu;
> +       rapl_pmu->cpu = cpu;
>         return 0;
>  }
>  
> @@ -679,7 +679,7 @@ static void cleanup_rapl_pmus(void)
>         int i;
>  
>         for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
> -               kfree(rapl_pmus->pmus[i]);
> +               kfree(rapl_pmus->rapl_pmu[i]);
>         kfree(rapl_pmus);
>  }
>  
> @@ -699,7 +699,7 @@ static int __init init_rapl_pmus(void)
>         if (rapl_pmu_is_pkg_scope())
>                 nr_rapl_pmu = topology_max_packages();
>  
> -       rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus,
> nr_rapl_pmu), GFP_KERNEL);
> +       rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu,
> nr_rapl_pmu), GFP_KERNEL);
>         if (!rapl_pmus)
>                 return -ENOMEM;
>
Dhananjay Ugwekar June 11, 2024, 8:33 a.m. UTC | #2
Hello Rui,

On 6/11/2024 11:13 AM, Zhang, Rui wrote:
> On Mon, 2024-06-10 at 10:07 +0000, Dhananjay Ugwekar wrote:
>> Rename struct rapl_pmu variables from "pmu" to "rapl_pmu", to
>> avoid any confusion between the variables of two different
>> structs pmu and rapl_pmu.
>>  As rapl_pmu also contains a pointer to
>> struct pmu, which leads to situations in code like pmu->pmu,
>> which is needlessly confusing. Above scenario is replaced with
>> much more readable rapl_pmu->pmu with this change.
>>
>> Also rename "pmus" member in rapl_pmus struct, for same reason.
>>
> 
> As you are adding a new per_core pmu, can we just rename the current
> rapl_pmu to something like rapl_pkg_pmus (as I mentioned in the
> previous email, we can consider the current RAPL MSRs as package scope
> on Intel platforms as well), and name the new one as rapl_core_pmus?

Sure this makes sense, will modify the original "rapl_pmus" variable name, 
but please note the renaming that I refer to in this patch is only 
limited to the local "struct rapl_pmu" variables used inside functions,
not the global variable name you mention.

Regards,
Dhananjay

> 
> IMO, "rapl_pmus" + "rapl_pmus_per_core" is still confusing.
> 
> thanks,
> rui
> 
>> No functional change.
>>
>> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
>> ---
>>  arch/x86/events/rapl.c | 104 ++++++++++++++++++++-------------------
>> --
>>  1 file changed, 52 insertions(+), 52 deletions(-)
>>
>> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
>> index 73be25e1f4b4..b4e2073a178e 100644
>> --- a/arch/x86/events/rapl.c
>> +++ b/arch/x86/events/rapl.c
>> @@ -120,7 +120,7 @@ struct rapl_pmu {
>>  struct rapl_pmus {
>>         struct pmu              pmu;
>>         unsigned int            nr_rapl_pmu;
>> -       struct rapl_pmu         *pmus[] __counted_by(nr_rapl_pmu);
>> +       struct rapl_pmu         *rapl_pmu[]
>> __counted_by(nr_rapl_pmu);
>>  };
>>  
>>  enum rapl_unit_quirk {
>> @@ -164,7 +164,7 @@ static inline struct rapl_pmu
>> *cpu_to_rapl_pmu(unsigned int cpu)
>>          * The unsigned check also catches the '-1' return value for
>> non
>>          * existent mappings in the topology map.
>>          */
>> -       return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
>>> pmus[rapl_pmu_idx] : NULL;
>> +       return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
>>> rapl_pmu[rapl_pmu_idx] : NULL;
>>  }
>>  
>>  static inline u64 rapl_read_counter(struct perf_event *event)
>> @@ -228,34 +228,34 @@ static void rapl_start_hrtimer(struct rapl_pmu
>> *pmu)
>>  
>>  static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer
>> *hrtimer)
>>  {
>> -       struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu,
>> hrtimer);
>> +       struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct
>> rapl_pmu, hrtimer);
>>         struct perf_event *event;
>>         unsigned long flags;
>>  
>> -       if (!pmu->n_active)
>> +       if (!rapl_pmu->n_active)
>>                 return HRTIMER_NORESTART;
>>  
>> -       raw_spin_lock_irqsave(&pmu->lock, flags);
>> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>>  
>> -       list_for_each_entry(event, &pmu->active_list, active_entry)
>> +       list_for_each_entry(event, &rapl_pmu->active_list,
>> active_entry)
>>                 rapl_event_update(event);
>>  
>> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
>> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>>  
>> -       hrtimer_forward_now(hrtimer, pmu->timer_interval);
>> +       hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval);
>>  
>>         return HRTIMER_RESTART;
>>  }
>>  
>> -static void rapl_hrtimer_init(struct rapl_pmu *pmu)
>> +static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
>>  {
>> -       struct hrtimer *hr = &pmu->hrtimer;
>> +       struct hrtimer *hr = &rapl_pmu->hrtimer;
>>  
>>         hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>>         hr->function = rapl_hrtimer_handle;
>>  }
>>  
>> -static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
>> +static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
>>                                    struct perf_event *event)
>>  {
>>         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
>> @@ -263,39 +263,39 @@ static void __rapl_pmu_event_start(struct
>> rapl_pmu *pmu,
>>  
>>         event->hw.state = 0;
>>  
>> -       list_add_tail(&event->active_entry, &pmu->active_list);
>> +       list_add_tail(&event->active_entry, &rapl_pmu->active_list);
>>  
>>         local64_set(&event->hw.prev_count, rapl_read_counter(event));
>>  
>> -       pmu->n_active++;
>> -       if (pmu->n_active == 1)
>> -               rapl_start_hrtimer(pmu);
>> +       rapl_pmu->n_active++;
>> +       if (rapl_pmu->n_active == 1)
>> +               rapl_start_hrtimer(rapl_pmu);
>>  }
>>  
>>  static void rapl_pmu_event_start(struct perf_event *event, int mode)
>>  {
>> -       struct rapl_pmu *pmu = event->pmu_private;
>> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>>         unsigned long flags;
>>  
>> -       raw_spin_lock_irqsave(&pmu->lock, flags);
>> -       __rapl_pmu_event_start(pmu, event);
>> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
>> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>> +       __rapl_pmu_event_start(rapl_pmu, event);
>> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>>  }
>>  
>>  static void rapl_pmu_event_stop(struct perf_event *event, int mode)
>>  {
>> -       struct rapl_pmu *pmu = event->pmu_private;
>> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>>         struct hw_perf_event *hwc = &event->hw;
>>         unsigned long flags;
>>  
>> -       raw_spin_lock_irqsave(&pmu->lock, flags);
>> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>>  
>>         /* mark event as deactivated and stopped */
>>         if (!(hwc->state & PERF_HES_STOPPED)) {
>> -               WARN_ON_ONCE(pmu->n_active <= 0);
>> -               pmu->n_active--;
>> -               if (pmu->n_active == 0)
>> -                       hrtimer_cancel(&pmu->hrtimer);
>> +               WARN_ON_ONCE(rapl_pmu->n_active <= 0);
>> +               rapl_pmu->n_active--;
>> +               if (rapl_pmu->n_active == 0)
>> +                       hrtimer_cancel(&rapl_pmu->hrtimer);
>>  
>>                 list_del(&event->active_entry);
>>  
>> @@ -313,23 +313,23 @@ static void rapl_pmu_event_stop(struct
>> perf_event *event, int mode)
>>                 hwc->state |= PERF_HES_UPTODATE;
>>         }
>>  
>> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
>> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>>  }
>>  
>>  static int rapl_pmu_event_add(struct perf_event *event, int mode)
>>  {
>> -       struct rapl_pmu *pmu = event->pmu_private;
>> +       struct rapl_pmu *rapl_pmu = event->pmu_private;
>>         struct hw_perf_event *hwc = &event->hw;
>>         unsigned long flags;
>>  
>> -       raw_spin_lock_irqsave(&pmu->lock, flags);
>> +       raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>>  
>>         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
>>  
>>         if (mode & PERF_EF_START)
>> -               __rapl_pmu_event_start(pmu, event);
>> +               __rapl_pmu_event_start(rapl_pmu, event);
>>  
>> -       raw_spin_unlock_irqrestore(&pmu->lock, flags);
>> +       raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>>  
>>         return 0;
>>  }
>> @@ -343,7 +343,7 @@ static int rapl_pmu_event_init(struct perf_event
>> *event)
>>  {
>>         u64 cfg = event->attr.config & RAPL_EVENT_MASK;
>>         int bit, ret = 0;
>> -       struct rapl_pmu *pmu;
>> +       struct rapl_pmu *rapl_pmu;
>>  
>>         /* only look at RAPL events */
>>         if (event->attr.type != rapl_pmus->pmu.type)
>> @@ -373,11 +373,11 @@ static int rapl_pmu_event_init(struct
>> perf_event *event)
>>                 return -EINVAL;
>>  
>>         /* must be done before validate_group */
>> -       pmu = cpu_to_rapl_pmu(event->cpu);
>> -       if (!pmu)
>> +       rapl_pmu = cpu_to_rapl_pmu(event->cpu);
>> +       if (!rapl_pmu)
>>                 return -EINVAL;
>> -       event->cpu = pmu->cpu;
>> -       event->pmu_private = pmu;
>> +       event->cpu = rapl_pmu->cpu;
>> +       event->pmu_private = rapl_pmu;
>>         event->hw.event_base = rapl_msrs[bit].msr;
>>         event->hw.config = cfg;
>>         event->hw.idx = bit;
>> @@ -560,22 +560,22 @@ static struct perf_msr amd_rapl_msrs[] = {
>>  static int rapl_cpu_offline(unsigned int cpu)
>>  {
>>         const struct cpumask *rapl_pmu_cpumask =
>> get_rapl_pmu_cpumask(cpu);
>> -       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>> +       struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
>>         int target;
>>  
>>         /* Check if exiting cpu is used for collecting rapl events */
>>         if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
>>                 return 0;
>>  
>> -       pmu->cpu = -1;
>> +       rapl_pmu->cpu = -1;
>>         /* Find a new cpu to collect rapl events */
>>         target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>>  
>>         /* Migrate rapl events to the new target */
>>         if (target < nr_cpu_ids) {
>>                 cpumask_set_cpu(target, &rapl_cpu_mask);
>> -               pmu->cpu = target;
>> -               perf_pmu_migrate_context(pmu->pmu, cpu, target);
>> +               rapl_pmu->cpu = target;
>> +               perf_pmu_migrate_context(rapl_pmu->pmu, cpu, target);
>>         }
>>         return 0;
>>  }
>> @@ -584,21 +584,21 @@ static int rapl_cpu_online(unsigned int cpu)
>>  {
>>         unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>>         const struct cpumask *rapl_pmu_cpumask =
>> get_rapl_pmu_cpumask(cpu);
>> -       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>> +       struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
>>         int target;
>>  
>> -       if (!pmu) {
>> -               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL,
>> cpu_to_node(cpu));
>> -               if (!pmu)
>> +       if (!rapl_pmu) {
>> +               rapl_pmu = kzalloc_node(sizeof(*rapl_pmu),
>> GFP_KERNEL, cpu_to_node(cpu));
>> +               if (!rapl_pmu)
>>                         return -ENOMEM;
>>  
>> -               raw_spin_lock_init(&pmu->lock);
>> -               INIT_LIST_HEAD(&pmu->active_list);
>> -               pmu->pmu = &rapl_pmus->pmu;
>> -               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
>> -               rapl_hrtimer_init(pmu);
>> +               raw_spin_lock_init(&rapl_pmu->lock);
>> +               INIT_LIST_HEAD(&rapl_pmu->active_list);
>> +               rapl_pmu->pmu = &rapl_pmus->pmu;
>> +               rapl_pmu->timer_interval =
>> ms_to_ktime(rapl_timer_ms);
>> +               rapl_hrtimer_init(rapl_pmu);
>>  
>> -               rapl_pmus->pmus[rapl_pmu_idx] = pmu;
>> +               rapl_pmus->rapl_pmu[rapl_pmu_idx] = rapl_pmu;
>>         }
>>  
>>         /*
>> @@ -610,7 +610,7 @@ static int rapl_cpu_online(unsigned int cpu)
>>                 return 0;
>>  
>>         cpumask_set_cpu(cpu, &rapl_cpu_mask);
>> -       pmu->cpu = cpu;
>> +       rapl_pmu->cpu = cpu;
>>         return 0;
>>  }
>>  
>> @@ -679,7 +679,7 @@ static void cleanup_rapl_pmus(void)
>>         int i;
>>  
>>         for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
>> -               kfree(rapl_pmus->pmus[i]);
>> +               kfree(rapl_pmus->rapl_pmu[i]);
>>         kfree(rapl_pmus);
>>  }
>>  
>> @@ -699,7 +699,7 @@ static int __init init_rapl_pmus(void)
>>         if (rapl_pmu_is_pkg_scope())
>>                 nr_rapl_pmu = topology_max_packages();
>>  
>> -       rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus,
>> nr_rapl_pmu), GFP_KERNEL);
>> +       rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu,
>> nr_rapl_pmu), GFP_KERNEL);
>>         if (!rapl_pmus)
>>                 return -ENOMEM;
>>  
>
diff mbox series

Patch

diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 73be25e1f4b4..b4e2073a178e 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -120,7 +120,7 @@  struct rapl_pmu {
 struct rapl_pmus {
 	struct pmu		pmu;
 	unsigned int		nr_rapl_pmu;
-	struct rapl_pmu		*pmus[] __counted_by(nr_rapl_pmu);
+	struct rapl_pmu		*rapl_pmu[] __counted_by(nr_rapl_pmu);
 };
 
 enum rapl_unit_quirk {
@@ -164,7 +164,7 @@  static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 	 * The unsigned check also catches the '-1' return value for non
 	 * existent mappings in the topology map.
 	 */
-	return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus->pmus[rapl_pmu_idx] : NULL;
+	return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus->rapl_pmu[rapl_pmu_idx] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -228,34 +228,34 @@  static void rapl_start_hrtimer(struct rapl_pmu *pmu)
 
 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
 {
-	struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
+	struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
 	struct perf_event *event;
 	unsigned long flags;
 
-	if (!pmu->n_active)
+	if (!rapl_pmu->n_active)
 		return HRTIMER_NORESTART;
 
-	raw_spin_lock_irqsave(&pmu->lock, flags);
+	raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
 
-	list_for_each_entry(event, &pmu->active_list, active_entry)
+	list_for_each_entry(event, &rapl_pmu->active_list, active_entry)
 		rapl_event_update(event);
 
-	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+	raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
 
-	hrtimer_forward_now(hrtimer, pmu->timer_interval);
+	hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval);
 
 	return HRTIMER_RESTART;
 }
 
-static void rapl_hrtimer_init(struct rapl_pmu *pmu)
+static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
 {
-	struct hrtimer *hr = &pmu->hrtimer;
+	struct hrtimer *hr = &rapl_pmu->hrtimer;
 
 	hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	hr->function = rapl_hrtimer_handle;
 }
 
-static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
+static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
 				   struct perf_event *event)
 {
 	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -263,39 +263,39 @@  static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
 
 	event->hw.state = 0;
 
-	list_add_tail(&event->active_entry, &pmu->active_list);
+	list_add_tail(&event->active_entry, &rapl_pmu->active_list);
 
 	local64_set(&event->hw.prev_count, rapl_read_counter(event));
 
-	pmu->n_active++;
-	if (pmu->n_active == 1)
-		rapl_start_hrtimer(pmu);
+	rapl_pmu->n_active++;
+	if (rapl_pmu->n_active == 1)
+		rapl_start_hrtimer(rapl_pmu);
 }
 
 static void rapl_pmu_event_start(struct perf_event *event, int mode)
 {
-	struct rapl_pmu *pmu = event->pmu_private;
+	struct rapl_pmu *rapl_pmu = event->pmu_private;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&pmu->lock, flags);
-	__rapl_pmu_event_start(pmu, event);
-	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+	raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
+	__rapl_pmu_event_start(rapl_pmu, event);
+	raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
 }
 
 static void rapl_pmu_event_stop(struct perf_event *event, int mode)
 {
-	struct rapl_pmu *pmu = event->pmu_private;
+	struct rapl_pmu *rapl_pmu = event->pmu_private;
 	struct hw_perf_event *hwc = &event->hw;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&pmu->lock, flags);
+	raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
 
 	/* mark event as deactivated and stopped */
 	if (!(hwc->state & PERF_HES_STOPPED)) {
-		WARN_ON_ONCE(pmu->n_active <= 0);
-		pmu->n_active--;
-		if (pmu->n_active == 0)
-			hrtimer_cancel(&pmu->hrtimer);
+		WARN_ON_ONCE(rapl_pmu->n_active <= 0);
+		rapl_pmu->n_active--;
+		if (rapl_pmu->n_active == 0)
+			hrtimer_cancel(&rapl_pmu->hrtimer);
 
 		list_del(&event->active_entry);
 
@@ -313,23 +313,23 @@  static void rapl_pmu_event_stop(struct perf_event *event, int mode)
 		hwc->state |= PERF_HES_UPTODATE;
 	}
 
-	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+	raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
 }
 
 static int rapl_pmu_event_add(struct perf_event *event, int mode)
 {
-	struct rapl_pmu *pmu = event->pmu_private;
+	struct rapl_pmu *rapl_pmu = event->pmu_private;
 	struct hw_perf_event *hwc = &event->hw;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&pmu->lock, flags);
+	raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
 
 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 
 	if (mode & PERF_EF_START)
-		__rapl_pmu_event_start(pmu, event);
+		__rapl_pmu_event_start(rapl_pmu, event);
 
-	raw_spin_unlock_irqrestore(&pmu->lock, flags);
+	raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
 
 	return 0;
 }
@@ -343,7 +343,7 @@  static int rapl_pmu_event_init(struct perf_event *event)
 {
 	u64 cfg = event->attr.config & RAPL_EVENT_MASK;
 	int bit, ret = 0;
-	struct rapl_pmu *pmu;
+	struct rapl_pmu *rapl_pmu;
 
 	/* only look at RAPL events */
 	if (event->attr.type != rapl_pmus->pmu.type)
@@ -373,11 +373,11 @@  static int rapl_pmu_event_init(struct perf_event *event)
 		return -EINVAL;
 
 	/* must be done before validate_group */
-	pmu = cpu_to_rapl_pmu(event->cpu);
-	if (!pmu)
+	rapl_pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!rapl_pmu)
 		return -EINVAL;
-	event->cpu = pmu->cpu;
-	event->pmu_private = pmu;
+	event->cpu = rapl_pmu->cpu;
+	event->pmu_private = rapl_pmu;
 	event->hw.event_base = rapl_msrs[bit].msr;
 	event->hw.config = cfg;
 	event->hw.idx = bit;
@@ -560,22 +560,22 @@  static struct perf_msr amd_rapl_msrs[] = {
 static int rapl_cpu_offline(unsigned int cpu)
 {
 	const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
+	struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 
 	/* Check if exiting cpu is used for collecting rapl events */
 	if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
 		return 0;
 
-	pmu->cpu = -1;
+	rapl_pmu->cpu = -1;
 	/* Find a new cpu to collect rapl events */
 	target = cpumask_any_but(rapl_pmu_cpumask, cpu);
 
 	/* Migrate rapl events to the new target */
 	if (target < nr_cpu_ids) {
 		cpumask_set_cpu(target, &rapl_cpu_mask);
-		pmu->cpu = target;
-		perf_pmu_migrate_context(pmu->pmu, cpu, target);
+		rapl_pmu->cpu = target;
+		perf_pmu_migrate_context(rapl_pmu->pmu, cpu, target);
 	}
 	return 0;
 }
@@ -584,21 +584,21 @@  static int rapl_cpu_online(unsigned int cpu)
 {
 	unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
 	const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
+	struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 
-	if (!pmu) {
-		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-		if (!pmu)
+	if (!rapl_pmu) {
+		rapl_pmu = kzalloc_node(sizeof(*rapl_pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!rapl_pmu)
 			return -ENOMEM;
 
-		raw_spin_lock_init(&pmu->lock);
-		INIT_LIST_HEAD(&pmu->active_list);
-		pmu->pmu = &rapl_pmus->pmu;
-		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-		rapl_hrtimer_init(pmu);
+		raw_spin_lock_init(&rapl_pmu->lock);
+		INIT_LIST_HEAD(&rapl_pmu->active_list);
+		rapl_pmu->pmu = &rapl_pmus->pmu;
+		rapl_pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(rapl_pmu);
 
-		rapl_pmus->pmus[rapl_pmu_idx] = pmu;
+		rapl_pmus->rapl_pmu[rapl_pmu_idx] = rapl_pmu;
 	}
 
 	/*
@@ -610,7 +610,7 @@  static int rapl_cpu_online(unsigned int cpu)
 		return 0;
 
 	cpumask_set_cpu(cpu, &rapl_cpu_mask);
-	pmu->cpu = cpu;
+	rapl_pmu->cpu = cpu;
 	return 0;
 }
 
@@ -679,7 +679,7 @@  static void cleanup_rapl_pmus(void)
 	int i;
 
 	for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
-		kfree(rapl_pmus->pmus[i]);
+		kfree(rapl_pmus->rapl_pmu[i]);
 	kfree(rapl_pmus);
 }
 
@@ -699,7 +699,7 @@  static int __init init_rapl_pmus(void)
 	if (rapl_pmu_is_pkg_scope())
 		nr_rapl_pmu = topology_max_packages();
 
-	rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
+	rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu, nr_rapl_pmu), GFP_KERNEL);
 	if (!rapl_pmus)
 		return -ENOMEM;