@@ -831,6 +831,9 @@ int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
uint32_t xc_get_cpu_featureset_size(void);
+/* Returns the APIC ID of the `cpu`-th CPU according to `policy` */
+uint32_t xc_cpu_to_apicid(const xc_cpu_policy_t *policy, unsigned int cpu);
+
enum xc_static_cpu_featuremask {
XC_FEATUREMASK_KNOWN,
XC_FEATUREMASK_SPECIAL,
@@ -725,8 +725,16 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
p->policy.basic.htt = test_bit(X86_FEATURE_HTT, host_featureset);
p->policy.extd.cmp_legacy = test_bit(X86_FEATURE_CMP_LEGACY, host_featureset);
}
- else
+ else if ( restore )
{
+ /*
+ * Reconstruct the topology exposed on Xen <= 4.13. It makes very little
+ * sense, but it's what those guests saw so it's set in stone now.
+ *
+ * Guests from Xen 4.14 onwards carry their own CPUID leaves in the
+ * migration stream so they don't need special treatment.
+ */
+
/*
* Topology for HVM guests is entirely controlled by Xen. For now, we
* hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
@@ -782,6 +790,20 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
break;
}
}
+ else
+ {
+ /* TODO: Expose the ability to choose a custom topology for HVM/PVH */
+ unsigned int threads_per_core = 1;
+ unsigned int cores_per_pkg = di.max_vcpu_id + 1;
+
+ rc = x86_topo_from_parts(&p->policy, threads_per_core, cores_per_pkg);
+ if ( rc )
+ {
+ ERROR("Failed to generate topology: rc=%d t/c=%u c/p=%u",
+ rc, threads_per_core, cores_per_pkg);
+ goto out;
+ }
+ }
nr_leaves = ARRAY_SIZE(p->leaves);
rc = x86_cpuid_copy_to_buffer(&p->policy, p->leaves, &nr_leaves);
@@ -1028,3 +1050,8 @@ bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
return false;
}
+
+uint32_t xc_cpu_to_apicid(const xc_cpu_policy_t *policy, unsigned int cpu)
+{
+ return x86_x2apic_id_from_vcpu_id(&policy->policy, cpu);
+}
@@ -1063,6 +1063,9 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
libxl_domain_build_info *const info = &d_config->b_info;
struct xc_dom_image *dom = NULL;
bool device_model = info->type == LIBXL_DOMAIN_TYPE_HVM ? true : false;
+#if defined(__i386__) || defined(__x86_64__)
+ struct xc_cpu_policy *policy = NULL;
+#endif
xc_dom_loginit(ctx->xch);
@@ -1083,8 +1086,22 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
dom->container_type = XC_DOM_HVM_CONTAINER;
#if defined(__i386__) || defined(__x86_64__)
+ policy = xc_cpu_policy_init();
+ if (!policy) {
+ LOGE(ERROR, "xc_cpu_policy_get_domain failed d%u", domid);
+ rc = ERROR_NOMEM;
+ goto out;
+ }
+
+ rc = xc_cpu_policy_get_domain(ctx->xch, domid, policy);
+ if (rc != 0) {
+ LOGE(ERROR, "xc_cpu_policy_get_domain failed d%u", domid);
+ rc = ERROR_FAIL;
+ goto out;
+ }
+
for ( uint32_t i = 0; i < info->max_vcpus; i++ )
- dom->cpu_to_apicid[i] = 2 * i; /* TODO: Replace by topo calculation */
+ dom->cpu_to_apicid[i] = xc_cpu_to_apicid(policy, i);
#endif
/* The params from the configuration file are in Mb, which are then
@@ -1214,6 +1231,9 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
out:
assert(rc != 0);
if (dom != NULL) xc_dom_release(dom);
+#if defined(__i386__) || defined(__x86_64__)
+ xc_cpu_policy_destroy(policy);
+#endif
return rc;
}
@@ -266,9 +266,6 @@ static void recalculate_misc(struct cpu_policy *p)
p->basic.raw[0x8] = EMPTY_LEAF;
- /* TODO: Rework topology logic. */
- memset(p->topo.raw, 0, sizeof(p->topo.raw));
-
p->basic.raw[0xc] = EMPTY_LEAF;
p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
@@ -616,6 +613,9 @@ static void __init calculate_pv_max_policy(void)
recalculate_xstate(p);
p->extd.raw[0xa] = EMPTY_LEAF; /* No SVM for PV guests. */
+
+ /* Wipe host topology. Populated by toolstack */
+ memset(p->topo.raw, 0, sizeof(p->topo.raw));
}
static void __init calculate_pv_def_policy(void)
@@ -779,6 +779,9 @@ static void __init calculate_hvm_max_policy(void)
/* It's always possible to emulate CPUID faulting for HVM guests */
p->platform_info.cpuid_faulting = true;
+
+ /* Wipe host topology. Populated by toolstack */
+ memset(p->topo.raw, 0, sizeof(p->topo.raw));
}
static void __init calculate_hvm_def_policy(void)
Expose sensible topologies in leaf 0xb. At the moment it synthesises non-HT systems, in line with the previous code intent. Leaf 0xb in the host policy is no longer zapped and the guest {max,def} policies have their topology leaves zapped instead. The intent is for toolstack to populate them. There's no current use for the topology information in the host policy, but it makes no harm. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> --- v6: * Assign accurate APIC IDs to xc_dom_img->cpu_to_apicid * New field in v6. Allows ACPI generation to be correct on PVH too. --- tools/include/xenguest.h | 3 +++ tools/libs/guest/xg_cpuid_x86.c | 29 ++++++++++++++++++++++++++++- tools/libs/light/libxl_dom.c | 22 +++++++++++++++++++++- xen/arch/x86/cpu-policy.c | 9 ++++++--- 4 files changed, 58 insertions(+), 5 deletions(-)