diff mbox series

KVM: x86/pmu: Add a helper to enable bits in FIXED_CTR_CTRL

Message ID 20240608000819.3296176-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Add a helper to enable bits in FIXED_CTR_CTRL | expand

Commit Message

Sean Christopherson June 8, 2024, 12:08 a.m. UTC
Add a helper, intel_pmu_enable_fixed_counter_bits(), to dedup code that
enables fixed counter bits, i.e. when KVM clears bits in the reserved mask
used to detect invalid MSR_CORE_PERF_FIXED_CTR_CTRL values.

No functional change intended.

Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)


base-commit: b9adc10edd4e14e66db4f7289a88fdbfa45ae7a8

Comments

Mi, Dapeng June 11, 2024, 1:45 a.m. UTC | #1
On 6/8/2024 8:08 AM, Sean Christopherson wrote:
> Add a helper, intel_pmu_enable_fixed_counter_bits(), to dedup code that
> enables fixed counter bits, i.e. when KVM clears bits in the reserved mask
> used to detect invalid MSR_CORE_PERF_FIXED_CTR_CTRL values.
>
> No functional change intended.
>
> Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/vmx/pmu_intel.c | 22 ++++++++++++----------
>  1 file changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index e01c87981927..fb5cbd6cbeff 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
>  	return eventsel;
>  }
>  
> +static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
> +{
> +	int i;
> +
> +	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
> +		pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
> +}
> +
>  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> @@ -457,7 +465,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  	union cpuid10_edx edx;
>  	u64 perf_capabilities;
>  	u64 counter_rsvd;
> -	int i;
>  
>  	memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
>  
> @@ -501,12 +508,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  			((u64)1 << edx.split.bit_width_fixed) - 1;
>  	}
>  
> -	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
> -		pmu->fixed_ctr_ctrl_rsvd &=
> -			 ~intel_fixed_bits_by_idx(i,
> -						  INTEL_FIXED_0_KERNEL |
> -						  INTEL_FIXED_0_USER |
> -						  INTEL_FIXED_0_ENABLE_PMI);
> +	intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
> +						 INTEL_FIXED_0_USER |
> +						 INTEL_FIXED_0_ENABLE_PMI);
>  
>  	counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
>  		(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
> @@ -551,10 +555,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
>  			pmu->pebs_enable_rsvd = counter_rsvd;
>  			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
> -			for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
> -				pmu->fixed_ctr_ctrl_rsvd &=
> -					~intel_fixed_bits_by_idx(i, ICL_FIXED_0_ADAPTIVE);
>  			pmu->pebs_data_cfg_rsvd = ~0xff00000full;
> +			intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
>  		} else {
>  			pmu->pebs_enable_rsvd =
>  				~((1ull << pmu->nr_arch_gp_counters) - 1);
>
> base-commit: b9adc10edd4e14e66db4f7289a88fdbfa45ae7a8

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Sean Christopherson June 12, 2024, 1:18 a.m. UTC | #2
On Fri, 07 Jun 2024 17:08:19 -0700, Sean Christopherson wrote:
> Add a helper, intel_pmu_enable_fixed_counter_bits(), to dedup code that
> enables fixed counter bits, i.e. when KVM clears bits in the reserved mask
> used to detect invalid MSR_CORE_PERF_FIXED_CTR_CTRL values.
> 
> No functional change intended.
> 
> 
> [...]

Applied to kvm-x86 pmu, thanks!

[1/1] KVM: x86/pmu: Add a helper to enable bits in FIXED_CTR_CTRL
      https://github.com/kvm-x86/linux/commit/3b65a692a5c7

--
https://github.com/kvm-x86/linux/tree/next
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index e01c87981927..fb5cbd6cbeff 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -448,6 +448,14 @@  static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
 	return eventsel;
 }
 
+static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
+{
+	int i;
+
+	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+		pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
+}
+
 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -457,7 +465,6 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	union cpuid10_edx edx;
 	u64 perf_capabilities;
 	u64 counter_rsvd;
-	int i;
 
 	memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
 
@@ -501,12 +508,9 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 			((u64)1 << edx.split.bit_width_fixed) - 1;
 	}
 
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-		pmu->fixed_ctr_ctrl_rsvd &=
-			 ~intel_fixed_bits_by_idx(i,
-						  INTEL_FIXED_0_KERNEL |
-						  INTEL_FIXED_0_USER |
-						  INTEL_FIXED_0_ENABLE_PMI);
+	intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
+						 INTEL_FIXED_0_USER |
+						 INTEL_FIXED_0_ENABLE_PMI);
 
 	counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
@@ -551,10 +555,8 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
 			pmu->pebs_enable_rsvd = counter_rsvd;
 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
-			for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-				pmu->fixed_ctr_ctrl_rsvd &=
-					~intel_fixed_bits_by_idx(i, ICL_FIXED_0_ADAPTIVE);
 			pmu->pebs_data_cfg_rsvd = ~0xff00000full;
+			intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
 		} else {
 			pmu->pebs_enable_rsvd =
 				~((1ull << pmu->nr_arch_gp_counters) - 1);