diff mbox series

x86: amend 'n' debug-key output with SMI count

Message ID 50978ef4-9f11-4c70-952b-94107616f265@suse.com (mailing list archive)
State Superseded
Headers show
Series x86: amend 'n' debug-key output with SMI count | expand

Commit Message

Jan Beulich Jan. 24, 2024, 3:27 p.m. UTC
... if available only, of course.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Andrew Cooper Jan. 24, 2024, 4:24 p.m. UTC | #1
On 24/01/2024 3:27 pm, Jan Beulich wrote:
> ... if available only, of course.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -406,9 +406,15 @@ void __init early_cpu_init(bool verbose)
>  		paddr_bits -= (ebx >> 6) & 0x3f;
>  	}
>  
> -	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
> +	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) {
> +		uint64_t smi_count;
> +
>  		park_offline_cpus = opt_mce;
>  
> +		if (!verbose && !rdmsr_safe(MSR_SMI_COUNT, smi_count))
> +			setup_force_cpu_cap(X86_FEATURE_SMI_COUNT);
> +	}
> +

I know you're re-using an existing condition, but I think it's more
likely that it's Intel-only than common to VIA and Shanghai.

Also, why is gated on verbose?

(I think I can see why this is rhetorical question, and I expect you can
guess what the feedback will be.)

>  	initialize_cpu_data(0);
>  }
>  
> --- a/xen/arch/x86/include/asm/cpufeatures.h
> +++ b/xen/arch/x86/include/asm/cpufeatures.h
> @@ -24,7 +24,7 @@ XEN_CPUFEATURE(APERFMPERF,        X86_SY
>  XEN_CPUFEATURE(MFENCE_RDTSC,      X86_SYNTH( 9)) /* MFENCE synchronizes RDTSC */
>  XEN_CPUFEATURE(XEN_SMEP,          X86_SYNTH(10)) /* SMEP gets used by Xen itself */
>  XEN_CPUFEATURE(XEN_SMAP,          X86_SYNTH(11)) /* SMAP gets used by Xen itself */
> -/* Bit 12 unused. */
> +XEN_CPUFEATURE(SMI_COUNT,         X86_SYNTH(12)) /* MSR_SMI_COUNT exists */
>  XEN_CPUFEATURE(IND_THUNK_LFENCE,  X86_SYNTH(13)) /* Use IND_THUNK_LFENCE */
>  XEN_CPUFEATURE(IND_THUNK_JMP,     X86_SYNTH(14)) /* Use IND_THUNK_JMP */
>  XEN_CPUFEATURE(SC_NO_BRANCH_HARDEN, X86_SYNTH(15)) /* (Disable) Conditional branch hardening */
> --- a/xen/arch/x86/include/asm/msr-index.h
> +++ b/xen/arch/x86/include/asm/msr-index.h
> @@ -28,6 +28,8 @@
>  #define  TEST_CTRL_SPLITLOCK_DETECT         (_AC(1, ULL) << 29)
>  #define  TEST_CTRL_SPLITLOCK_DISABLE        (_AC(1, ULL) << 31)
>  
> +#define MSR_SMI_COUNT                       0x00000034
> +
>  #define MSR_INTEL_CORE_THREAD_COUNT         0x00000035
>  #define  MSR_CTC_THREAD_MASK                0x0000ffff
>  #define  MSR_CTC_CORE_MASK                  _AC(0xffff0000, U)
> --- a/xen/arch/x86/nmi.c
> +++ b/xen/arch/x86/nmi.c
> @@ -589,9 +589,20 @@ static void cf_check do_nmi_stats(unsign
>      unsigned int cpu;
>      bool pend, mask;
>  
> -    printk("CPU\tNMI\n");
> +    printk("CPU\tNMI%s\n", boot_cpu_has(X86_FEATURE_SMI_COUNT) ? "\tSMI" : "");
>      for_each_online_cpu ( cpu )
> -        printk("%3u\t%3u\n", cpu, per_cpu(nmi_count, cpu));
> +    {
> +        printk("%3u\t%3u", cpu, per_cpu(nmi_count, cpu));
> +        if ( boot_cpu_has(X86_FEATURE_SMI_COUNT) )
> +        {
> +            unsigned int smi_count, dummy;
> +
> +            rdmsr(MSR_SMI_COUNT, smi_count, dummy);
> +            printk("\t%3u\n", smi_count);

This reads MSR_SMI_COUNT repeatedly on the same CPU.

You'll need to IPI all CPUs to dump the count into a per-cpu variable.

~Andrew
Jan Beulich Jan. 25, 2024, 7:59 a.m. UTC | #2
On 24.01.2024 17:24, Andrew Cooper wrote:
> On 24/01/2024 3:27 pm, Jan Beulich wrote:
>> ... if available only, of course.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> --- a/xen/arch/x86/cpu/common.c
>> +++ b/xen/arch/x86/cpu/common.c
>> @@ -406,9 +406,15 @@ void __init early_cpu_init(bool verbose)
>>  		paddr_bits -= (ebx >> 6) & 0x3f;
>>  	}
>>  
>> -	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
>> +	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) {
>> +		uint64_t smi_count;
>> +
>>  		park_offline_cpus = opt_mce;
>>  
>> +		if (!verbose && !rdmsr_safe(MSR_SMI_COUNT, smi_count))
>> +			setup_force_cpu_cap(X86_FEATURE_SMI_COUNT);
>> +	}
>> +
> 
> I know you're re-using an existing condition, but I think it's more
> likely that it's Intel-only than common to VIA and Shanghai.

Then again when re-using the condition I questioned how likely it is
that people actually use Xen on CPUs of these two vendors, when the
respective code is only bit-rotting.

> Also, why is gated on verbose?
> 
> (I think I can see why this is rhetorical question, and I expect you can
> guess what the feedback will be.)

Hmm, no, I don't think I can guess that. The reason is simple: In
case the MSR doesn't exist, I'd like to avoid the respective (debug)
log message, emitted while recovering from the #GP, appearing twice.
(Which imo eliminates the only guess I might otherwise have: Don't
add complexity [the extra part of the condition] when it's not
needed.)

>> --- a/xen/arch/x86/nmi.c
>> +++ b/xen/arch/x86/nmi.c
>> @@ -589,9 +589,20 @@ static void cf_check do_nmi_stats(unsign
>>      unsigned int cpu;
>>      bool pend, mask;
>>  
>> -    printk("CPU\tNMI\n");
>> +    printk("CPU\tNMI%s\n", boot_cpu_has(X86_FEATURE_SMI_COUNT) ? "\tSMI" : "");
>>      for_each_online_cpu ( cpu )
>> -        printk("%3u\t%3u\n", cpu, per_cpu(nmi_count, cpu));
>> +    {
>> +        printk("%3u\t%3u", cpu, per_cpu(nmi_count, cpu));
>> +        if ( boot_cpu_has(X86_FEATURE_SMI_COUNT) )
>> +        {
>> +            unsigned int smi_count, dummy;
>> +
>> +            rdmsr(MSR_SMI_COUNT, smi_count, dummy);
>> +            printk("\t%3u\n", smi_count);
> 
> This reads MSR_SMI_COUNT repeatedly on the same CPU.
> 
> You'll need to IPI all CPUs to dump the count into a per-cpu variable.

Oh, how embarrassing.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -406,9 +406,15 @@  void __init early_cpu_init(bool verbose)
 		paddr_bits -= (ebx >> 6) & 0x3f;
 	}
 
-	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+	if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) {
+		uint64_t smi_count;
+
 		park_offline_cpus = opt_mce;
 
+		if (!verbose && !rdmsr_safe(MSR_SMI_COUNT, smi_count))
+			setup_force_cpu_cap(X86_FEATURE_SMI_COUNT);
+	}
+
 	initialize_cpu_data(0);
 }
 
--- a/xen/arch/x86/include/asm/cpufeatures.h
+++ b/xen/arch/x86/include/asm/cpufeatures.h
@@ -24,7 +24,7 @@  XEN_CPUFEATURE(APERFMPERF,        X86_SY
 XEN_CPUFEATURE(MFENCE_RDTSC,      X86_SYNTH( 9)) /* MFENCE synchronizes RDTSC */
 XEN_CPUFEATURE(XEN_SMEP,          X86_SYNTH(10)) /* SMEP gets used by Xen itself */
 XEN_CPUFEATURE(XEN_SMAP,          X86_SYNTH(11)) /* SMAP gets used by Xen itself */
-/* Bit 12 unused. */
+XEN_CPUFEATURE(SMI_COUNT,         X86_SYNTH(12)) /* MSR_SMI_COUNT exists */
 XEN_CPUFEATURE(IND_THUNK_LFENCE,  X86_SYNTH(13)) /* Use IND_THUNK_LFENCE */
 XEN_CPUFEATURE(IND_THUNK_JMP,     X86_SYNTH(14)) /* Use IND_THUNK_JMP */
 XEN_CPUFEATURE(SC_NO_BRANCH_HARDEN, X86_SYNTH(15)) /* (Disable) Conditional branch hardening */
--- a/xen/arch/x86/include/asm/msr-index.h
+++ b/xen/arch/x86/include/asm/msr-index.h
@@ -28,6 +28,8 @@ 
 #define  TEST_CTRL_SPLITLOCK_DETECT         (_AC(1, ULL) << 29)
 #define  TEST_CTRL_SPLITLOCK_DISABLE        (_AC(1, ULL) << 31)
 
+#define MSR_SMI_COUNT                       0x00000034
+
 #define MSR_INTEL_CORE_THREAD_COUNT         0x00000035
 #define  MSR_CTC_THREAD_MASK                0x0000ffff
 #define  MSR_CTC_CORE_MASK                  _AC(0xffff0000, U)
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -589,9 +589,20 @@  static void cf_check do_nmi_stats(unsign
     unsigned int cpu;
     bool pend, mask;
 
-    printk("CPU\tNMI\n");
+    printk("CPU\tNMI%s\n", boot_cpu_has(X86_FEATURE_SMI_COUNT) ? "\tSMI" : "");
     for_each_online_cpu ( cpu )
-        printk("%3u\t%3u\n", cpu, per_cpu(nmi_count, cpu));
+    {
+        printk("%3u\t%3u", cpu, per_cpu(nmi_count, cpu));
+        if ( boot_cpu_has(X86_FEATURE_SMI_COUNT) )
+        {
+            unsigned int smi_count, dummy;
+
+            rdmsr(MSR_SMI_COUNT, smi_count, dummy);
+            printk("\t%3u\n", smi_count);
+        }
+        else
+            printk("\n");
+    }
 
     if ( !hardware_domain || !(v = domain_vcpu(hardware_domain, 0)) )
         return;