diff mbox series

x86/CPU: convert vendor hook invocations to altcall

Message ID ece45cf6-51c8-413e-9963-21144fe64f2a@suse.com (mailing list archive)
State Superseded
Headers show
Series x86/CPU: convert vendor hook invocations to altcall | expand

Commit Message

Jan Beulich Jan. 16, 2024, 8:41 a.m. UTC
While not performance critical, these hook invocations still want
converting: This way all pre-filled struct cpu_dev instances can become
__initconst_cf_clobber, thus allowing to eliminate further 8 ENDBR
during the 2nd phase of alternatives patching (besides moving previously
resident data to .init.*).

Since all use sites need touching anyway, take the opportunity and also
address a Misra C:2012 Rule 5.5 violation: Rename the this_cpu static
variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
With LTO it might end up necessary to tag as __used more than just
"default_cpu".

Comments

Jan Beulich Jan. 16, 2024, 8:55 a.m. UTC | #1
On 16.01.2024 09:41, Jan Beulich wrote:
> While not performance critical, these hook invocations still want
> converting: This way all pre-filled struct cpu_dev instances can become
> __initconst_cf_clobber, thus allowing to eliminate further 8 ENDBR
> during the 2nd phase of alternatives patching (besides moving previously
> resident data to .init.*).
> 
> Since all use sites need touching anyway, take the opportunity and also
> address a Misra C:2012 Rule 5.5 violation: Rename the this_cpu static
> variable.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> With LTO it might end up necessary to tag as __used more than just
> "default_cpu".

Yet then ...

> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -113,10 +113,10 @@ static void cf_check default_init(struct
>  	__clear_bit(X86_FEATURE_SEP, c->x86_capability);
>  }
>  
> -static const struct cpu_dev default_cpu = {
> +static const struct cpu_dev __initconst_cf_clobber default_cpu = {

... I sent a stale patch, where __used was still missing here.

Considering the intention behind __initconst_cf_clobber I wonder
whether we wouldn't better integrate __used into that macro, such
that we become independent of optimizations the tool chain may be
doing.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -1302,7 +1302,7 @@  static void cf_check init_amd(struct cpu
 	amd_log_freq(c);
 }
 
-const struct cpu_dev amd_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber amd_cpu_dev = {
 	.c_early_init	= early_init_amd,
 	.c_init		= init_amd,
 };
--- a/xen/arch/x86/cpu/centaur.c
+++ b/xen/arch/x86/cpu/centaur.c
@@ -54,6 +54,6 @@  static void cf_check init_centaur(struct
 		init_c3(c);
 }
 
-const struct cpu_dev centaur_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber centaur_cpu_dev = {
 	.c_init		= init_centaur,
 };
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -113,10 +113,10 @@  static void cf_check default_init(struct
 	__clear_bit(X86_FEATURE_SEP, c->x86_capability);
 }
 
-static const struct cpu_dev default_cpu = {
+static const struct cpu_dev __initconst_cf_clobber default_cpu = {
 	.c_init	= default_init,
 };
-static const struct cpu_dev *this_cpu = &default_cpu;
+static struct cpu_dev __ro_after_init actual_cpu;
 
 static DEFINE_PER_CPU(uint64_t, msr_misc_features);
 void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next);
@@ -336,12 +336,13 @@  void __init early_cpu_init(bool verbose)
 
 	c->x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
 	switch (c->x86_vendor) {
-	case X86_VENDOR_INTEL:	  this_cpu = &intel_cpu_dev;    break;
-	case X86_VENDOR_AMD:	  this_cpu = &amd_cpu_dev;      break;
-	case X86_VENDOR_CENTAUR:  this_cpu = &centaur_cpu_dev;  break;
-	case X86_VENDOR_SHANGHAI: this_cpu = &shanghai_cpu_dev; break;
-	case X86_VENDOR_HYGON:    this_cpu = &hygon_cpu_dev;    break;
+	case X86_VENDOR_INTEL:	  actual_cpu = intel_cpu_dev;    break;
+	case X86_VENDOR_AMD:	  actual_cpu = amd_cpu_dev;      break;
+	case X86_VENDOR_CENTAUR:  actual_cpu = centaur_cpu_dev;  break;
+	case X86_VENDOR_SHANGHAI: actual_cpu = shanghai_cpu_dev; break;
+	case X86_VENDOR_HYGON:    actual_cpu = hygon_cpu_dev;    break;
 	default:
+		actual_cpu = default_cpu;
 		if (!verbose)
 			break;
 		printk(XENLOG_ERR
@@ -448,8 +449,8 @@  static void generic_identify(struct cpui
 	if (c->extended_cpuid_level >= 0x80000021)
 		c->x86_capability[FEATURESET_e21a] = cpuid_eax(0x80000021);
 
-	if (this_cpu->c_early_init)
-		this_cpu->c_early_init(c);
+	if (actual_cpu.c_early_init)
+		alternative_vcall(actual_cpu.c_early_init, c);
 
 	/* c_early_init() may have adjusted cpuid levels/features.  Reread. */
 	c->cpuid_level = cpuid_eax(0);
@@ -546,9 +547,8 @@  void identify_cpu(struct cpuinfo_x86 *c)
 	 * At the end of this section, c->x86_capability better
 	 * indicate the features this CPU genuinely supports!
 	 */
-	if (this_cpu->c_init)
-		this_cpu->c_init(c);
-
+	if (actual_cpu.c_init)
+		alternative_vcall(actual_cpu.c_init, c);
 
 	/*
 	 * The vendor-specific functions might have changed features.  Now
--- a/xen/arch/x86/cpu/hygon.c
+++ b/xen/arch/x86/cpu/hygon.c
@@ -87,7 +87,7 @@  static void cf_check init_hygon(struct c
 	amd_log_freq(c);
 }
 
-const struct cpu_dev hygon_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber hygon_cpu_dev = {
 	.c_early_init	= early_init_amd,
 	.c_init		= init_hygon,
 };
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -577,7 +577,7 @@  static void cf_check init_intel(struct c
 		setup_clear_cpu_cap(X86_FEATURE_CLWB);
 }
 
-const struct cpu_dev intel_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber intel_cpu_dev = {
 	.c_early_init	= early_init_intel,
 	.c_init		= init_intel,
 };
--- a/xen/arch/x86/cpu/shanghai.c
+++ b/xen/arch/x86/cpu/shanghai.c
@@ -15,6 +15,6 @@  static void cf_check init_shanghai(struc
     init_intel_cacheinfo(c);
 }
 
-const struct cpu_dev shanghai_cpu_dev = {
+const struct cpu_dev __initconst_cf_clobber shanghai_cpu_dev = {
     .c_init     = init_shanghai,
 };