@@ -214,8 +214,8 @@ static enum microcode_match_result cf_check compare_patch(
return compare_header(new, old);
}
-static int cf_check apply_microcode(const struct microcode_patch *patch,
- unsigned int flags)
+static int cf_check amd_ucode_load(const struct microcode_patch *patch,
+ unsigned int flags)
{
int hw_err;
unsigned int cpu = smp_processor_id();
@@ -446,7 +446,7 @@ static struct microcode_patch *cf_check amd_ucode_parse(
static const struct microcode_ops __initconst_cf_clobber amd_ucode_ops = {
.parse = amd_ucode_parse,
.collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
+ .load = amd_ucode_load,
.compare_patch = compare_patch,
};
@@ -229,6 +229,12 @@ static const struct microcode_patch *ucode_parse(const char *buf, size_t len)
return alternative_call(ucode_ops.parse, buf, len, false);
}
+/* Load a ucode blob. Returns -errno. */
+static int ucode_load(const struct microcode_patch *patch, unsigned int flags)
+{
+ return alternative_call(ucode_ops.load, patch, flags);
+}
+
static DEFINE_SPINLOCK(microcode_mutex);
DEFINE_PER_CPU(struct cpu_signature, cpu_sig);
@@ -333,11 +339,10 @@ static int microcode_update_cpu(const struct microcode_patch *patch,
spin_lock(µcode_mutex);
if ( patch )
- err = alternative_call(ucode_ops.apply_microcode, patch, flags);
+ err = ucode_load(patch, flags);
else if ( microcode_cache )
{
- err = alternative_call(ucode_ops.apply_microcode, microcode_cache,
- flags);
+ err = ucode_load(microcode_cache, flags);
if ( err == -EIO )
{
microcode_free_patch(microcode_cache);
@@ -388,7 +393,7 @@ static int primary_thread_work(const struct microcode_patch *patch,
if ( !wait_for_state(LOADING_ENTER) )
return -EBUSY;
- ret = alternative_call(ucode_ops.apply_microcode, patch, flags);
+ ret = ucode_load(patch, flags);
if ( !ret )
atomic_inc(&cpu_updated);
atomic_inc(&cpu_out);
@@ -502,7 +507,7 @@ static int control_thread_fn(const struct microcode_patch *patch,
goto out;
/* Control thread loads ucode first while others are in NMI handler. */
- ret = alternative_call(ucode_ops.apply_microcode, patch, flags);
+ ret = ucode_load(patch, flags);
if ( !ret )
atomic_inc(&cpu_updated);
atomic_inc(&cpu_out);
@@ -731,7 +736,7 @@ int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
if ( flags & ~XENPF_UCODE_FORCE )
return -EINVAL;
- if ( !ucode_ops.apply_microcode )
+ if ( !ucode_ops.load )
return -EINVAL;
buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
@@ -783,7 +788,7 @@ int microcode_update_one(void)
if ( ucode_ops.collect_cpu_info )
alternative_vcall(ucode_ops.collect_cpu_info);
- if ( !ucode_ops.apply_microcode )
+ if ( !ucode_ops.load )
return -EOPNOTSUPP;
return microcode_update_cpu(NULL, 0);
@@ -821,7 +826,7 @@ int __init microcode_init_cache(struct boot_info *bi)
{
int rc = 0;
- if ( !ucode_ops.apply_microcode )
+ if ( !ucode_ops.load )
return -ENODEV;
if ( ucode_scan )
@@ -907,11 +912,11 @@ int __init early_microcode_init(struct boot_info *bi)
*
* Take the hint in either case and ignore the microcode interface.
*/
- if ( !ucode_ops.apply_microcode || this_cpu(cpu_sig).rev == ~0 )
+ if ( !ucode_ops.load || this_cpu(cpu_sig).rev == ~0 )
{
printk(XENLOG_INFO "Microcode loading disabled due to: %s\n",
- ucode_ops.apply_microcode ? "rev = ~0" : "HW toggle");
- ucode_ops.apply_microcode = NULL;
+ ucode_ops.load ? "rev = ~0" : "HW toggle");
+ ucode_ops.load = NULL;
return -ENODEV;
}
@@ -287,8 +287,8 @@ static enum microcode_match_result cf_check compare_patch(
return compare_revisions(old->rev, new->rev);
}
-static int cf_check apply_microcode(const struct microcode_patch *patch,
- unsigned int flags)
+static int cf_check intel_ucode_load(const struct microcode_patch *patch,
+ unsigned int flags)
{
uint64_t msr_content;
unsigned int cpu = smp_processor_id();
@@ -408,7 +408,7 @@ static bool __init can_load_microcode(void)
static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
.parse = intel_ucode_parse,
.collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
+ .load = intel_ucode_load,
.compare_patch = compare_patch,
};
@@ -417,5 +417,5 @@ void __init ucode_probe_intel(struct microcode_ops *ops)
*ops = intel_ucode_ops;
if ( !can_load_microcode() )
- ops->apply_microcode = NULL;
+ ops->load = NULL;
}
@@ -50,8 +50,7 @@ struct microcode_ops {
* Attempt to load the provided patch into the CPU. Returns an error if
* anything didn't go as expected.
*/
- int (*apply_microcode)(const struct microcode_patch *patch,
- unsigned int flags);
+ int (*load)(const struct microcode_patch *patch, unsigned int flags);
/*
* Given two patches, are they both applicable to the current CPU, and is
@@ -68,8 +67,8 @@ struct microcode_ops {
* - Loading available
*
* These are encoded by (not) filling in ops->collect_cpu_info (i.e. no
- * support available) and (not) ops->apply_microcode (i.e. read only).
- * Otherwise, all hooks must be filled in.
+ * support available) and (not) ops->load (i.e. read only). Otherwise, all
+ * hooks must be filled in.
*/
#ifdef CONFIG_AMD
void ucode_probe_amd(struct microcode_ops *ops);