diff mbox series

[6/9] KVM: x86/pmu: Add Intel PMU supported fixed counters mask

Message ID 20230901072809.640175-7-xiong.y.zhang@intel.com (mailing list archive)
State New, archived
Headers show
Series Upgrade vPMU version to 5 | expand

Commit Message

Zhang, Xiong Y Sept. 1, 2023, 7:28 a.m. UTC
From: Like Xu <likexu@tencent.com>

Per Intel SDM, fixed-function performance counter 'i' is supported:

	FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i);
if pmu.version >=5, ECX is supported fixed counters bit mask.
if 1 < pmu.version < 5, EDX[4:0] is number of contiguous fixed-function
performance counters starting from 0.

which means that the KVM user space can use EDX to limit the number of
fixed counters starting from 0 and at the same time, using ECX to enable
part of other KVM supported fixed counters. i.e: pmu.version = 5,
ECX= 0x5, EDX[4:0]=1, FxCtrl[2, 0] are supported, FxCtrl[1] isn't
supported.

Add Fixed counter bit mask into all_valid_pmc_idx, and use it to perform
the semantic checks.

Since fixed counter may be non-continuous, nr_arch_fixed_counters can
not be used to enumerate fixed counters, for_each_set_bit_from() is
used to enumerate fixed counters, and nr_arch_fixed_counters is deleted.

Signed-off-by: Like Xu <likexu@tencent.com>
Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/pmu.h              | 12 +++++-
 arch/x86/kvm/svm/pmu.c          |  1 -
 arch/x86/kvm/vmx/pmu_intel.c    | 69 ++++++++++++++++++++-------------
 4 files changed, 52 insertions(+), 31 deletions(-)

Comments

Mi, Dapeng Sept. 6, 2023, 10:08 a.m. UTC | #1
On 9/1/2023 3:28 PM, Xiong Zhang wrote:
> From: Like Xu <likexu@tencent.com>
>
> Per Intel SDM, fixed-function performance counter 'i' is supported:
>
> 	FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i);
> if pmu.version >=5, ECX is supported fixed counters bit mask.
> if 1 < pmu.version < 5, EDX[4:0] is number of contiguous fixed-function
> performance counters starting from 0.
>
> which means that the KVM user space can use EDX to limit the number of
> fixed counters starting from 0 and at the same time, using ECX to enable
> part of other KVM supported fixed counters. i.e: pmu.version = 5,
> ECX= 0x5, EDX[4:0]=1, FxCtrl[2, 0] are supported, FxCtrl[1] isn't
> supported.
>
> Add Fixed counter bit mask into all_valid_pmc_idx, and use it to perform
> the semantic checks.
>
> Since fixed counter may be non-continuous, nr_arch_fixed_counters can
> not be used to enumerate fixed counters, for_each_set_bit_from() is
> used to enumerate fixed counters, and nr_arch_fixed_counters is deleted.
>
> Signed-off-by: Like Xu <likexu@tencent.com>
> Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com>
> ---
>   arch/x86/include/asm/kvm_host.h |  1 -
>   arch/x86/kvm/pmu.h              | 12 +++++-
>   arch/x86/kvm/svm/pmu.c          |  1 -
>   arch/x86/kvm/vmx/pmu_intel.c    | 69 ++++++++++++++++++++-------------
>   4 files changed, 52 insertions(+), 31 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 9f57aa33798b..ceba4f89dec5 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -515,7 +515,6 @@ struct kvm_pmc {
>   struct kvm_pmu {
>   	u8 version;
>   	unsigned nr_arch_gp_counters;
> -	unsigned nr_arch_fixed_counters;
>   	unsigned available_event_types;
>   	u64 fixed_ctr_ctrl;
>   	u64 fixed_ctr_ctrl_mask;
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index 7d9ba301c090..4bab4819ea6c 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -125,14 +125,22 @@ static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
>   	return NULL;
>   }
>   
> +static inline bool fixed_ctr_is_supported(struct kvm_pmu *pmu, unsigned int idx)
> +{
> +	return test_bit(INTEL_PMC_IDX_FIXED + idx, pmu->all_valid_pmc_idx);
> +}
> +
>   /* returns fixed PMC with the specified MSR */
>   static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
>   {
>   	int base = MSR_CORE_PERF_FIXED_CTR0;
>   
> -	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
> +	if (msr >= base && msr < base + KVM_PMC_MAX_FIXED) {
>   		u32 index = array_index_nospec(msr - base,
> -					       pmu->nr_arch_fixed_counters);
> +					       KVM_PMC_MAX_FIXED);
> +
> +		if (!fixed_ctr_is_supported(pmu, index))
> +			return NULL;
>   
>   		return &pmu->fixed_counters[index];
>   	}
> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
> index cef5a3d0abd0..d0a12e739989 100644
> --- a/arch/x86/kvm/svm/pmu.c
> +++ b/arch/x86/kvm/svm/pmu.c
> @@ -213,7 +213,6 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
>   	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
>   	/* not applicable to AMD; but clean them to prevent any fall out */
>   	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
> -	pmu->nr_arch_fixed_counters = 0;
>   	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
>   }
>   
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 46363ac82a79..ce6d06ec562c 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -72,10 +72,12 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
>   {
>   	struct kvm_pmc *pmc;
>   	u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
> -	int i;
> +	int s = INTEL_PMC_IDX_FIXED;
>   
>   	pmu->fixed_ctr_ctrl = data;
> -	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> +	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
> +			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
> +		int i = s - INTEL_PMC_IDX_FIXED;
>   		u8 new_ctrl = fixed_ctrl_field(data, i);
>   		u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
>   
> @@ -132,7 +134,7 @@ static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
>   
>   	idx &= ~(3u << 30);
>   
> -	return fixed ? idx < pmu->nr_arch_fixed_counters
> +	return fixed ? fixed_ctr_is_supported(pmu, idx)
>   		     : idx < pmu->nr_arch_gp_counters;
>   }
>   
> @@ -144,16 +146,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
>   	struct kvm_pmc *counters;
>   	unsigned int num_counters;
>   
> +	if (!intel_is_valid_rdpmc_ecx(vcpu, idx))
> +		return NULL;
> +
>   	idx &= ~(3u << 30);
>   	if (fixed) {
>   		counters = pmu->fixed_counters;
> -		num_counters = pmu->nr_arch_fixed_counters;
> +		num_counters = KVM_PMC_MAX_FIXED;
>   	} else {
>   		counters = pmu->gp_counters;
>   		num_counters = pmu->nr_arch_gp_counters;
>   	}
> -	if (idx >= num_counters)
> -		return NULL;
>   	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
>   	return &counters[array_index_nospec(idx, num_counters)];
>   }
> @@ -352,6 +355,7 @@ static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
>   {
>   	u64 data = 0;
>   	int i;
> +	int s = INTEL_PMC_IDX_FIXED;
>   
>   	for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
>   		struct kvm_pmc *pmc = &pmu->gp_counters[i];
> @@ -371,7 +375,10 @@ static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
>   			data |= MSR_CORE_PERF_GLOBAL_INUSE_PMI;
>   	}
>   
> -	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> +	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
> +			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
> +		i = s - INTEL_PMC_IDX_FIXED;
> +
>   		/*
>   		 * IA32_PERF_GLOBAL_INUSE.FCi_InUse[bit (i + 32)]: This bit
>   		 * reflects the logical state of
> @@ -379,7 +386,7 @@ static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
>   		 */
>   		if (pmu->fixed_ctr_ctrl &
>   		    intel_fixed_bits_by_idx(i, INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER))
> -			data |= 1ULL << (i + INTEL_PMC_IDX_FIXED);
> +			data |= 1ULL << s;
>   		/*
>   		 * IA32_PERF_GLOBAL_INUSE.PMI_InUse[bit 63]: This bit is set if
>   		 * IA32_FIXED_CTR_CTRL.ENi_PMI, i = 0, 1, 2 is set.
> @@ -565,12 +572,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   
>   static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
>   {
> -	int i;
> +	int s = INTEL_PMC_IDX_FIXED;
>   
>   	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
>   
> -	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> -		int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
> +	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
> +			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
> +		int index = array_index_nospec(s - INTEL_PMC_IDX_FIXED,
> +					       KVM_PMC_MAX_FIXED);
>   		struct kvm_pmc *pmc = &pmu->fixed_counters[index];
>   		u32 event = fixed_pmc_events[index];
>   
> @@ -591,7 +600,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   	int i;
>   
>   	pmu->nr_arch_gp_counters = 0;
> -	pmu->nr_arch_fixed_counters = 0;
>   	pmu->counter_bitmask[KVM_PMC_GP] = 0;
>   	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
>   	pmu->version = 0;
> @@ -633,11 +641,22 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   	pmu->available_event_types = ~entry->ebx &
>   					((1ull << eax.split.mask_length) - 1);
>   
> -	if (pmu->version == 1) {
> -		pmu->nr_arch_fixed_counters = 0;
> -	} else {
> -		pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
> -						    kvm_pmu_cap.num_counters_fixed);
> +	counter_mask = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
> +	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
> +
> +	if (pmu->version > 1) {
> +		for (i = 0; i < kvm_pmu_cap.num_counters_fixed; i++) {
> +			/*
> +			 * FxCtr[i]_is_supported :=
> +			 *	CPUID.0xA.ECX[i] || EDX[4:0] > i
> +			 */
> +			if (!(entry->ecx & BIT_ULL(i) || edx.split.num_counters_fixed > i))
> +				continue;
> +
> +			set_bit(INTEL_PMC_IDX_FIXED + i, pmu->all_valid_pmc_idx);
> +			pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
> +			counter_mask &= ~BIT_ULL(INTEL_PMC_IDX_FIXED + i);
> +		}
>   		edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
>   						  kvm_pmu_cap.bit_width_fixed);
>   		pmu->counter_bitmask[KVM_PMC_FIXED] =
> @@ -645,10 +664,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   		setup_fixed_pmc_eventsel(pmu);
>   	}
>   
> -	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
> -		pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
> -	counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
> -		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
>   	pmu->global_ctrl_mask = counter_mask;
>   
>   	/*
> @@ -674,11 +689,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   		pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
>   	}
>   
> -	bitmap_set(pmu->all_valid_pmc_idx,
> -		0, pmu->nr_arch_gp_counters);
> -	bitmap_set(pmu->all_valid_pmc_idx,
> -		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
> -
>   	perf_capabilities = vcpu_get_perf_capabilities(vcpu);
>   	if (cpuid_model_is_consistent(vcpu) &&
>   	    (perf_capabilities & PMU_CAP_LBR_FMT))
> @@ -691,9 +701,14 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>   
>   	if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
>   		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
> +			int s = INTEL_PMC_IDX_FIXED;
> +			int e = INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED;
> +
>   			pmu->pebs_enable_mask = counter_mask;
>   			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
> -			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> +
> +			for_each_set_bit_from(s, pmu->all_valid_pmc_idx, e) {
> +				i = s - INTEL_PMC_IDX_FIXED;
>   				pmu->fixed_ctr_ctrl_mask &=
>   					~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
>   			}
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9f57aa33798b..ceba4f89dec5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -515,7 +515,6 @@  struct kvm_pmc {
 struct kvm_pmu {
 	u8 version;
 	unsigned nr_arch_gp_counters;
-	unsigned nr_arch_fixed_counters;
 	unsigned available_event_types;
 	u64 fixed_ctr_ctrl;
 	u64 fixed_ctr_ctrl_mask;
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7d9ba301c090..4bab4819ea6c 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -125,14 +125,22 @@  static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
 	return NULL;
 }
 
+static inline bool fixed_ctr_is_supported(struct kvm_pmu *pmu, unsigned int idx)
+{
+	return test_bit(INTEL_PMC_IDX_FIXED + idx, pmu->all_valid_pmc_idx);
+}
+
 /* returns fixed PMC with the specified MSR */
 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
 {
 	int base = MSR_CORE_PERF_FIXED_CTR0;
 
-	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
+	if (msr >= base && msr < base + KVM_PMC_MAX_FIXED) {
 		u32 index = array_index_nospec(msr - base,
-					       pmu->nr_arch_fixed_counters);
+					       KVM_PMC_MAX_FIXED);
+
+		if (!fixed_ctr_is_supported(pmu, index))
+			return NULL;
 
 		return &pmu->fixed_counters[index];
 	}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index cef5a3d0abd0..d0a12e739989 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -213,7 +213,6 @@  static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
 	/* not applicable to AMD; but clean them to prevent any fall out */
 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
-	pmu->nr_arch_fixed_counters = 0;
 	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
 }
 
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 46363ac82a79..ce6d06ec562c 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -72,10 +72,12 @@  static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
 	struct kvm_pmc *pmc;
 	u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
-	int i;
+	int s = INTEL_PMC_IDX_FIXED;
 
 	pmu->fixed_ctr_ctrl = data;
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
+			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
+		int i = s - INTEL_PMC_IDX_FIXED;
 		u8 new_ctrl = fixed_ctrl_field(data, i);
 		u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
 
@@ -132,7 +134,7 @@  static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 
 	idx &= ~(3u << 30);
 
-	return fixed ? idx < pmu->nr_arch_fixed_counters
+	return fixed ? fixed_ctr_is_supported(pmu, idx)
 		     : idx < pmu->nr_arch_gp_counters;
 }
 
@@ -144,16 +146,17 @@  static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 	struct kvm_pmc *counters;
 	unsigned int num_counters;
 
+	if (!intel_is_valid_rdpmc_ecx(vcpu, idx))
+		return NULL;
+
 	idx &= ~(3u << 30);
 	if (fixed) {
 		counters = pmu->fixed_counters;
-		num_counters = pmu->nr_arch_fixed_counters;
+		num_counters = KVM_PMC_MAX_FIXED;
 	} else {
 		counters = pmu->gp_counters;
 		num_counters = pmu->nr_arch_gp_counters;
 	}
-	if (idx >= num_counters)
-		return NULL;
 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
 	return &counters[array_index_nospec(idx, num_counters)];
 }
@@ -352,6 +355,7 @@  static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
 {
 	u64 data = 0;
 	int i;
+	int s = INTEL_PMC_IDX_FIXED;
 
 	for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
@@ -371,7 +375,10 @@  static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
 			data |= MSR_CORE_PERF_GLOBAL_INUSE_PMI;
 	}
 
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
+			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
+		i = s - INTEL_PMC_IDX_FIXED;
+
 		/*
 		 * IA32_PERF_GLOBAL_INUSE.FCi_InUse[bit (i + 32)]: This bit
 		 * reflects the logical state of
@@ -379,7 +386,7 @@  static u64 intel_pmu_global_inuse_emulation(struct kvm_pmu *pmu)
 		 */
 		if (pmu->fixed_ctr_ctrl &
 		    intel_fixed_bits_by_idx(i, INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER))
-			data |= 1ULL << (i + INTEL_PMC_IDX_FIXED);
+			data |= 1ULL << s;
 		/*
 		 * IA32_PERF_GLOBAL_INUSE.PMI_InUse[bit 63]: This bit is set if
 		 * IA32_FIXED_CTR_CTRL.ENi_PMI, i = 0, 1, 2 is set.
@@ -565,12 +572,14 @@  static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
 {
-	int i;
+	int s = INTEL_PMC_IDX_FIXED;
 
 	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
 
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-		int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
+	for_each_set_bit_from(s, pmu->all_valid_pmc_idx,
+			      INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED) {
+		int index = array_index_nospec(s - INTEL_PMC_IDX_FIXED,
+					       KVM_PMC_MAX_FIXED);
 		struct kvm_pmc *pmc = &pmu->fixed_counters[index];
 		u32 event = fixed_pmc_events[index];
 
@@ -591,7 +600,6 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	int i;
 
 	pmu->nr_arch_gp_counters = 0;
-	pmu->nr_arch_fixed_counters = 0;
 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
 	pmu->version = 0;
@@ -633,11 +641,22 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	pmu->available_event_types = ~entry->ebx &
 					((1ull << eax.split.mask_length) - 1);
 
-	if (pmu->version == 1) {
-		pmu->nr_arch_fixed_counters = 0;
-	} else {
-		pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
-						    kvm_pmu_cap.num_counters_fixed);
+	counter_mask = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
+	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+
+	if (pmu->version > 1) {
+		for (i = 0; i < kvm_pmu_cap.num_counters_fixed; i++) {
+			/*
+			 * FxCtr[i]_is_supported :=
+			 *	CPUID.0xA.ECX[i] || EDX[4:0] > i
+			 */
+			if (!(entry->ecx & BIT_ULL(i) || edx.split.num_counters_fixed > i))
+				continue;
+
+			set_bit(INTEL_PMC_IDX_FIXED + i, pmu->all_valid_pmc_idx);
+			pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
+			counter_mask &= ~BIT_ULL(INTEL_PMC_IDX_FIXED + i);
+		}
 		edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
 						  kvm_pmu_cap.bit_width_fixed);
 		pmu->counter_bitmask[KVM_PMC_FIXED] =
@@ -645,10 +664,6 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 		setup_fixed_pmc_eventsel(pmu);
 	}
 
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-		pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
-	counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
-		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
 	pmu->global_ctrl_mask = counter_mask;
 
 	/*
@@ -674,11 +689,6 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 		pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
 	}
 
-	bitmap_set(pmu->all_valid_pmc_idx,
-		0, pmu->nr_arch_gp_counters);
-	bitmap_set(pmu->all_valid_pmc_idx,
-		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
-
 	perf_capabilities = vcpu_get_perf_capabilities(vcpu);
 	if (cpuid_model_is_consistent(vcpu) &&
 	    (perf_capabilities & PMU_CAP_LBR_FMT))
@@ -691,9 +701,14 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
 	if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
 		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
+			int s = INTEL_PMC_IDX_FIXED;
+			int e = INTEL_PMC_IDX_FIXED + INTEL_PMC_MAX_FIXED;
+
 			pmu->pebs_enable_mask = counter_mask;
 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
-			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+
+			for_each_set_bit_from(s, pmu->all_valid_pmc_idx, e) {
+				i = s - INTEL_PMC_IDX_FIXED;
 				pmu->fixed_ctr_ctrl_mask &=
 					~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
 			}