diff mbox series

[RFC,10/22] x86/PMUv2: limit number of fixed perf counters to 3

Message ID 0355493e5b5771e663381b163efb21d210f56c42.1698261255.git.edwin.torok@cloud.com (mailing list archive)
State New, archived
Headers show
Series vPMU bugfixes and support for PMUv5 | expand

Commit Message

Edwin Torok Oct. 25, 2023, 7:29 p.m. UTC
From: Edwin Török <edvin.torok@citrix.com>

There are only 3 architectural fixed function counters defined,
however Icelake introduces a 4th.
So we'll need to report the number of fixed counter implemented in CPUID
correctly for Icelake, define a macro to ensure we are consistent about
which counter is last.

Note: simply adding MSR_CORE_PERF_FIXED_CTR3 is not enough, Icelake also
defines MSR_PERF_METRICS and there are some ordering constraints on
restoring the MSR, and atomicity constraints on IA32_PERF_GLOBAL_CTRL,
so this is not implemented yet.

Backport: 4.13+

Signed-off-by: Edwin Török <edvin.torok@citrix.com>
---
 xen/arch/x86/cpu/vpmu_intel.c        | 6 ++++++
 xen/arch/x86/hvm/vmx/vmx.c           | 4 ++--
 xen/arch/x86/include/asm/msr-index.h | 4 ++--
 xen/arch/x86/pv/emul-priv-op.c       | 4 ++--
 4 files changed, 12 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index ef8d69a0d6..4c0776cee7 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -926,6 +926,12 @@  const struct arch_vpmu_ops *__init core2_vpmu_init(void)
     }
 
     fixed_pmc_cnt = core2_get_fixed_pmc_count();
+#define PERF_FIXED_CTR_MAX (MSR_CORE_PERF_FIXED_CTRn - MSR_CORE_PERF_FIXED_CTR0 + 1)
+    if ( fixed_pmc_cnt > PERF_FIXED_CTR_MAX )
+    {
+        printk(XENLOG_INFO "VPMU: limiting fixed perf counters to %d\n", PERF_FIXED_CTR_MAX);
+        fixed_pmc_cnt = PERF_FIXED_CTR_MAX;
+    }
 
     if ( cpu_has_pdcm )
     {
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 7d51addf7a..1510e980dd 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3362,7 +3362,7 @@  static int cf_check vmx_msr_read_intercept(
         /* FALLTHROUGH */
     case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR_LAST:
     case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL_LAST:
-    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTRn:
     case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
     case MSR_IA32_PEBS_ENABLE:
     case MSR_IA32_DS_AREA:
@@ -3680,7 +3680,7 @@  static int cf_check vmx_msr_write_intercept(
 
     case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR_LAST:
     case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL_LAST:
-    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTRn:
     case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
     case MSR_IA32_PEBS_ENABLE:
     case MSR_IA32_DS_AREA:
diff --git a/xen/arch/x86/include/asm/msr-index.h b/xen/arch/x86/include/asm/msr-index.h
index 011a926e0e..8a881a8a6f 100644
--- a/xen/arch/x86/include/asm/msr-index.h
+++ b/xen/arch/x86/include/asm/msr-index.h
@@ -674,8 +674,8 @@ 
 
 /* Intel Core-based CPU performance counters */
 #define MSR_CORE_PERF_FIXED_CTR0	0x00000309
-#define MSR_CORE_PERF_FIXED_CTR1	0x0000030a
-#define MSR_CORE_PERF_FIXED_CTR2	0x0000030b
+#define MSR_CORE_PERF_FIXED_CTRn	0x0000030b
+
 #define MSR_CORE_PERF_FIXED_CTR_CTRL	0x0000038d
 #define MSR_CORE_PERF_GLOBAL_STATUS	0x0000038e
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 301a70f5ea..a8472fc779 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -967,7 +967,7 @@  static int cf_check read_msr(
 
     case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR_LAST:
     case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL_LAST:
-    case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
+    case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTRn:
     case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
         if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
         {
@@ -1147,7 +1147,7 @@  static int cf_check write_msr(
 
     case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR_LAST:
     case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL_LAST:
-    case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
+    case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTRn:
     case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
         if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
         {