@@ -76,6 +76,7 @@ bool emul_test_init(void)
unsigned long sp;
x86_cpu_policy_fill_native(&cp);
+ x86_cpu_policy_bound_max_leaves(&cp);
/*
* The emulator doesn't use these instructions, so can always emulate
@@ -88,6 +89,8 @@ bool emul_test_init(void)
cp.feat.wrmsrns = true;
cp.extd.clzero = true;
+ x86_cpu_policy_shrink_max_leaves(&cp);
+
if ( cpu_has_xsave )
{
unsigned int tmp, ebx;
@@ -352,28 +352,19 @@ void calculate_raw_cpu_policy(void)
static void __init calculate_host_policy(void)
{
struct cpu_policy *p = &host_cpu_policy;
- unsigned int max_extd_leaf;
*p = raw_cpu_policy;
- p->basic.max_leaf =
- min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1);
- p->feat.max_subleaf =
- min_t(uint32_t, p->feat.max_subleaf, ARRAY_SIZE(p->feat.raw) - 1);
-
- max_extd_leaf = p->extd.max_leaf;
-
/*
* For AMD/Hygon hardware before Zen3, we unilaterally modify LFENCE to be
* dispatch serialising for Spectre mitigations. Extend max_extd_leaf
* beyond what hardware supports, to include the feature leaf containing
* this information.
*/
- if ( cpu_has_lfence_dispatch )
- max_extd_leaf = max(max_extd_leaf, 0x80000021U);
+ if ( cpu_has_lfence_dispatch && p->extd.max_leaf < 0x80000021U )
+ p->extd.max_leaf = 0x80000021U;
- p->extd.max_leaf = 0x80000000U | min_t(uint32_t, max_extd_leaf & 0xffff,
- ARRAY_SIZE(p->extd.raw) - 1);
+ x86_cpu_policy_bound_max_leaves(p);
x86_cpu_featureset_to_policy(boot_cpu_data.x86_capability, p);
recalculate_xstate(p);
@@ -439,6 +439,12 @@ void x86_cpu_policy_fill_native(struct c
void x86_cpu_policy_clear_out_of_range_leaves(struct cpu_policy *p);
/**
+ * Bound max leaf/subleaf values according to the capacity of the respective
+ * arrays in struct cpu_policy.
+ */
+void x86_cpu_policy_bound_max_leaves(struct cpu_policy *p);
+
+/**
* Shrink max leaf/subleaf values such that the last respective valid entry
* isn't all blank. While permitted by the spec, such extraneous leaves may
* provide undue "hints" to guests.
@@ -291,6 +291,16 @@ void x86_cpu_policy_clear_out_of_range_l
ARRAY_SIZE(p->extd.raw) - 1);
}
+void __init x86_cpu_policy_bound_max_leaves(struct cpu_policy *p)
+{
+ p->basic.max_leaf =
+ min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1);
+ p->feat.max_subleaf =
+ min_t(uint32_t, p->feat.max_subleaf, ARRAY_SIZE(p->feat.raw) - 1);
+ p->extd.max_leaf = 0x80000000U | min_t(uint32_t, p->extd.max_leaf & 0xffff,
+ ARRAY_SIZE(p->extd.raw) - 1);
+}
+
void x86_cpu_policy_shrink_max_leaves(struct cpu_policy *p)
{
unsigned int i;
@@ -5,6 +5,7 @@
#include <xen/bitops.h>
#include <xen/guest_access.h>
+#include <xen/init.h>
#include <xen/kernel.h>
#include <xen/lib.h>
#include <xen/nospec.h>
@@ -28,6 +29,11 @@
#include <xen-tools/common-macros.h>
+#define __init
+#define __initdata
+#define __initconst
+#define __initconstrel
+
static inline bool test_bit(unsigned int bit, const void *vaddr)
{
const char *addr = vaddr;
Break out this logic from calculate_host_policy() to also use it in the x86 emulator harness, where subsequently we'll want to avoid open-coding AMX maximum palette bounding as well as AVX10 sub-feature pruning. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v4: Mark x86_cpu_policy_bound_max_leaves() __init. Add U suffixes. Re-base. v2: New.