@@ -422,7 +422,7 @@ static struct microcode_patch *cf_check cpu_request_microcode(
return patch;
}
-const struct microcode_ops amd_ucode_ops = {
+const struct microcode_ops __initconst_cf_clobber amd_ucode_ops = {
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode,
@@ -21,6 +21,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <xen/alternative-call.h>
#include <xen/cpu.h>
#include <xen/earlycpio.h>
#include <xen/err.h>
@@ -214,7 +215,7 @@ void __init microcode_grab_module(
microcode_scan_module(module_map, mbi);
}
-static const struct microcode_ops __read_mostly *microcode_ops;
+static struct microcode_ops __ro_after_init ucode_ops;
static DEFINE_SPINLOCK(microcode_mutex);
@@ -241,9 +242,9 @@ static const struct microcode_patch *nmi_patch = ZERO_BLOCK_PTR;
*/
static struct microcode_patch *parse_blob(const char *buf, size_t len)
{
- microcode_ops->collect_cpu_info();
+ alternative_vcall(ucode_ops.collect_cpu_info);
- return microcode_ops->cpu_request_microcode(buf, len);
+ return alternative_call(ucode_ops.cpu_request_microcode, buf, len);
}
static void microcode_free_patch(struct microcode_patch *patch)
@@ -258,8 +259,8 @@ static bool microcode_update_cache(struct microcode_patch *patch)
if ( !microcode_cache )
microcode_cache = patch;
- else if ( microcode_ops->compare_patch(patch,
- microcode_cache) == NEW_UCODE )
+ else if ( alternative_call(ucode_ops.compare_patch,
+ patch, microcode_cache) == NEW_UCODE )
{
microcode_free_patch(microcode_cache);
microcode_cache = patch;
@@ -311,14 +312,14 @@ static int microcode_update_cpu(const struct microcode_patch *patch)
{
int err;
- microcode_ops->collect_cpu_info();
+ alternative_vcall(ucode_ops.collect_cpu_info);
spin_lock(µcode_mutex);
if ( patch )
- err = microcode_ops->apply_microcode(patch);
+ err = alternative_call(ucode_ops.apply_microcode, patch);
else if ( microcode_cache )
{
- err = microcode_ops->apply_microcode(microcode_cache);
+ err = alternative_call(ucode_ops.apply_microcode, microcode_cache);
if ( err == -EIO )
{
microcode_free_patch(microcode_cache);
@@ -368,7 +369,7 @@ static int primary_thread_work(const struct microcode_patch *patch)
if ( !wait_for_state(LOADING_ENTER) )
return -EBUSY;
- ret = microcode_ops->apply_microcode(patch);
+ ret = alternative_call(ucode_ops.apply_microcode, patch);
if ( !ret )
atomic_inc(&cpu_updated);
atomic_inc(&cpu_out);
@@ -481,7 +482,7 @@ static int control_thread_fn(const struct microcode_patch *patch)
}
/* Control thread loads ucode first while others are in NMI handler. */
- ret = microcode_ops->apply_microcode(patch);
+ ret = alternative_call(ucode_ops.apply_microcode, patch);
if ( !ret )
atomic_inc(&cpu_updated);
atomic_inc(&cpu_out);
@@ -610,7 +611,8 @@ static long cf_check microcode_update_helper(void *data)
*/
spin_lock(µcode_mutex);
if ( microcode_cache &&
- microcode_ops->compare_patch(patch, microcode_cache) != NEW_UCODE )
+ alternative_call(ucode_ops.compare_patch,
+ patch, microcode_cache) != NEW_UCODE )
{
spin_unlock(µcode_mutex);
printk(XENLOG_WARNING "microcode: couldn't find any newer revision "
@@ -678,7 +680,7 @@ int microcode_update(XEN_GUEST_HANDLE(const_void) buf, unsigned long len)
if ( len != (uint32_t)len )
return -E2BIG;
- if ( microcode_ops == NULL )
+ if ( !ucode_ops.apply_microcode )
return -EINVAL;
buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
@@ -722,10 +724,10 @@ __initcall(microcode_init);
/* Load a cached update to current cpu */
int microcode_update_one(void)
{
- if ( !microcode_ops )
+ if ( !ucode_ops.apply_microcode )
return -EOPNOTSUPP;
- microcode_ops->collect_cpu_info();
+ alternative_vcall(ucode_ops.collect_cpu_info);
return microcode_update_cpu(NULL);
}
@@ -780,22 +782,22 @@ int __init early_microcode_init(void)
{
case X86_VENDOR_AMD:
if ( c->x86 >= 0x10 )
- microcode_ops = &amd_ucode_ops;
+ ucode_ops = amd_ucode_ops;
break;
case X86_VENDOR_INTEL:
if ( c->x86 >= 6 )
- microcode_ops = &intel_ucode_ops;
+ ucode_ops = intel_ucode_ops;
break;
}
- if ( !microcode_ops )
+ if ( !ucode_ops.apply_microcode )
{
printk(XENLOG_WARNING "Microcode loading not available\n");
return -ENODEV;
}
- microcode_ops->collect_cpu_info();
+ alternative_vcall(ucode_ops.collect_cpu_info);
if ( ucode_mod.mod_end || ucode_blob.size )
rc = early_microcode_update_cpu();
@@ -376,7 +376,7 @@ static struct microcode_patch *cf_check cpu_request_microcode(
return patch;
}
-const struct microcode_ops intel_ucode_ops = {
+const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
.cpu_request_microcode = cpu_request_microcode,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode,
Microcode loading is not a fastpath, but there are control flow integrity hardening benefits from using altcall, because it allows us to clobber the endbr64 instructions on all function pointer targets. Convert the existing microcode_ops pointer into an __ro_after_init structure, and move {amd,intel}_ucode_ops into __initconst_cf_clobber. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> v2: * Adjust commit message. * Use __initconst_cf_clobber and __ro_after_init. --- xen/arch/x86/cpu/microcode/amd.c | 2 +- xen/arch/x86/cpu/microcode/core.c | 38 ++++++++++++++++++++------------------ xen/arch/x86/cpu/microcode/intel.c | 2 +- 3 files changed, 22 insertions(+), 20 deletions(-)