@@ -989,6 +989,7 @@ const static void *hvm_get_save_record(const void *ctx, unsigned int type,
static int vcpu_hvm(struct xc_dom_image *dom)
{
+ /* Initialises the BSP */
struct {
struct hvm_save_descriptor header_d;
HVM_SAVE_TYPE(HEADER) header;
@@ -997,6 +998,18 @@ static int vcpu_hvm(struct xc_dom_image *dom)
struct hvm_save_descriptor end_d;
HVM_SAVE_TYPE(END) end;
} bsp_ctx;
+ /* Initialises APICs and MTRRs of every vCPU */
+ struct {
+ struct hvm_save_descriptor header_d;
+ HVM_SAVE_TYPE(HEADER) header;
+ struct hvm_save_descriptor mtrr_d;
+ HVM_SAVE_TYPE(MTRR) mtrr;
+ struct hvm_save_descriptor end_d;
+ HVM_SAVE_TYPE(END) end;
+ } vcpu_ctx;
+ /* Context from full_ctx */
+ const HVM_SAVE_TYPE(MTRR) *mtrr_record;
+ /* Raw context as taken from Xen */
uint8_t *full_ctx = NULL;
int rc;
@@ -1083,51 +1096,42 @@ static int vcpu_hvm(struct xc_dom_image *dom)
bsp_ctx.end_d.instance = 0;
bsp_ctx.end_d.length = HVM_SAVE_LENGTH(END);
- /* TODO: maybe this should be a firmware option instead? */
- if ( !dom->device_model )
+ /* TODO: maybe setting MTRRs should be a firmware option instead? */
+ mtrr_record = hvm_get_save_record(full_ctx, HVM_SAVE_CODE(MTRR), 0);
+
+ if ( !mtrr_record)
{
- struct {
- struct hvm_save_descriptor header_d;
- HVM_SAVE_TYPE(HEADER) header;
- struct hvm_save_descriptor mtrr_d;
- HVM_SAVE_TYPE(MTRR) mtrr;
- struct hvm_save_descriptor end_d;
- HVM_SAVE_TYPE(END) end;
- } mtrr = {
- .header_d = bsp_ctx.header_d,
- .header = bsp_ctx.header,
- .mtrr_d.typecode = HVM_SAVE_CODE(MTRR),
- .mtrr_d.length = HVM_SAVE_LENGTH(MTRR),
- .end_d = bsp_ctx.end_d,
- .end = bsp_ctx.end,
- };
- const HVM_SAVE_TYPE(MTRR) *mtrr_record =
- hvm_get_save_record(full_ctx, HVM_SAVE_CODE(MTRR), 0);
- unsigned int i;
-
- if ( !mtrr_record )
- {
- xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
- "%s: unable to get MTRR save record", __func__);
- goto out;
- }
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: unable to get MTRR save record", __func__);
+ goto out;
+ }
- memcpy(&mtrr.mtrr, mtrr_record, sizeof(mtrr.mtrr));
+ vcpu_ctx.header_d = bsp_ctx.header_d;
+ vcpu_ctx.header = bsp_ctx.header;
+ vcpu_ctx.mtrr_d.typecode = HVM_SAVE_CODE(MTRR);
+ vcpu_ctx.mtrr_d.length = HVM_SAVE_LENGTH(MTRR);
+ vcpu_ctx.mtrr = *mtrr_record;
+ vcpu_ctx.end_d = bsp_ctx.end_d;
+ vcpu_ctx.end = bsp_ctx.end;
- /*
- * Enable MTRR, set default type to WB.
- * TODO: add MMIO areas as UC when passthrough is supported.
- */
- mtrr.mtrr.msr_mtrr_def_type = MTRR_TYPE_WRBACK | MTRR_DEF_TYPE_ENABLE;
+ /*
+ * Enable MTRR, set default type to WB.
+ * TODO: add MMIO areas as UC when passthrough is supported in PVH
+ */
+ if ( !dom->device_model )
+ vcpu_ctx.mtrr.msr_mtrr_def_type = MTRR_TYPE_WRBACK | MTRR_DEF_TYPE_ENABLE;
+
+ for ( unsigned int i = 0; i < dom->max_vcpus; i++ )
+ {
+ vcpu_ctx.mtrr_d.instance = i;
- for ( i = 0; i < dom->max_vcpus; i++ )
+ rc = xc_domain_hvm_setcontext(dom->xch, dom->guest_domid,
+ (uint8_t *)&vcpu_ctx, sizeof(vcpu_ctx));
+ if ( rc != 0 )
{
- mtrr.mtrr_d.instance = i;
- rc = xc_domain_hvm_setcontext(dom->xch, dom->guest_domid,
- (uint8_t *)&mtrr, sizeof(mtrr));
- if ( rc != 0 )
- xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
- "%s: SETHVMCONTEXT failed (rc=%d)", __func__, rc);
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: SETHVMCONTEXT failed (rc=%d)", __func__, rc);
+ goto out;
}
}
Currently used by PVH to set MTRR, will be used by a later patch to set APIC state. Unconditionally send the hypercall, and gate overriding the MTRR so it remains functionally equivalent. While at it, add a missing "goto out" to what was the error condition in the loop. In principle this patch shouldn't affect functionality. An extra record (the MTRR) is sent to the hypervisor per vCPU on HVM, but these records are identical to those retrieved in the first place so there's no expected functional change. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> --- v7: * Unchanged --- tools/libs/guest/xg_dom_x86.c | 84 ++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 40 deletions(-)