Message ID | 20170705031225.916-1-haozhong.zhang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 05.07.17 at 05:12, <haozhong.zhang@intel.com> wrote: > @@ -878,6 +879,8 @@ long arch_do_domctl( > } > else > { > + static const unsigned long vmce_off = offsetof(typeof(*evc), vmce); I'm unconvinced the static is useful (and not potentially harmful) here. I'm also not convinced this really needs to be unsigned long (unsigned int is generally more efficient to deal with). > @@ -917,9 +920,21 @@ long arch_do_domctl( > offsetof(struct xen_domctl_ext_vcpucontext, > vmce.caps)); > BUILD_BUG_ON(sizeof(evc->mcg_cap) != sizeof(evc->vmce.caps)); > - if ( evc->size >= offsetof(typeof(*evc), vmce) + > - sizeof(evc->vmce) ) > + if ( evc->size >= vmce_off + sizeof(evc->vmce) ) > ret = vmce_restore_vcpu(v, &evc->vmce); > + else if ( evc->size >= vmce_off + > + offsetof(typeof(evc->vmce), mcg_ext_ctl) ) > + { > + /* > + * If migrating from old Xen that uses a smaller 'struct > + * hvm_vmce_vcpu', only restore the components in the > + * old version. > + */ > + struct hvm_vmce_vcpu vmce = { 0 }; Just { } would suffice. > + memcpy(&vmce, &evc->vmce, evc->size - vmce_off); > + ret = vmce_restore_vcpu(v, &vmce); > + } > else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) + > sizeof(evc->mcg_cap) ) > { So you really have two choices here: Either mirror/clone what's already there, or (preferable imo) carefully generalize the existing code. But please don't use yet another slightly different model. Jan
On 07/05/17 04:36 -0600, Jan Beulich wrote: > >>> On 05.07.17 at 05:12, <haozhong.zhang@intel.com> wrote: > > @@ -878,6 +879,8 @@ long arch_do_domctl( [..] > > @@ -917,9 +920,21 @@ long arch_do_domctl( > > offsetof(struct xen_domctl_ext_vcpucontext, > > vmce.caps)); > > BUILD_BUG_ON(sizeof(evc->mcg_cap) != sizeof(evc->vmce.caps)); > > - if ( evc->size >= offsetof(typeof(*evc), vmce) + > > - sizeof(evc->vmce) ) > > + if ( evc->size >= vmce_off + sizeof(evc->vmce) ) > > ret = vmce_restore_vcpu(v, &evc->vmce); > > + else if ( evc->size >= vmce_off + > > + offsetof(typeof(evc->vmce), mcg_ext_ctl) ) > > + { > > + /* > > + * If migrating from old Xen that uses a smaller 'struct > > + * hvm_vmce_vcpu', only restore the components in the > > + * old version. > > + */ > > + struct hvm_vmce_vcpu vmce = { 0 }; > > Just { } would suffice. > > > + memcpy(&vmce, &evc->vmce, evc->size - vmce_off); > > + ret = vmce_restore_vcpu(v, &vmce); > > + } > > else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) + > > sizeof(evc->mcg_cap) ) > > { > > So you really have two choices here: Either mirror/clone what's > already there, or (preferable imo) carefully generalize the existing > code. But please don't use yet another slightly different model. > How is about generalize above two else cases by else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) + sizeof(evc->mcg_cap) ) { struct hvm_vmce_vcpu vmce = { }; memcpy(&vmce, &evc->vmce, evc->size - offsetof(typeof(*evc), mcg_cap)); ret = vmce_restore_vcpu(v, &vmce); } However, I'm not sure whether it's safe to blindly copy what is passed in 'evc->vmce'. Before this patch, the else-if branch only copies mcg_cap if the size of evc->vmce is larger than the size of mcg_cap, so it implies no? Haozhong
>>> On 06.07.17 at 04:03, <haozhong.zhang@intel.com> wrote: > How is about generalize above two else cases by > > else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) + > sizeof(evc->mcg_cap) ) > { > struct hvm_vmce_vcpu vmce = { }; > > memcpy(&vmce, &evc->vmce, > evc->size - offsetof(typeof(*evc), mcg_cap)); > ret = vmce_restore_vcpu(v, &vmce); > } Fundamentally yes, but as you say ... > However, I'm not sure whether it's safe to blindly copy what is passed > in 'evc->vmce'. Before this patch, the else-if branch only copies > mcg_cap if the size of evc->vmce is larger than the size of mcg_cap, > so it implies no? ... I think we ought to at least avoid copying partial fields. Hence I think the generalization would involve introduction of a table of sizes / boundaries at which the copied range may end. Jan
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index 1356f611ab..060e2d0582 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -91,6 +91,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt) v->arch.vmce.mcg_cap = ctxt->caps; v->arch.vmce.bank[0].mci_ctl2 = ctxt->mci_ctl2_bank0; v->arch.vmce.bank[1].mci_ctl2 = ctxt->mci_ctl2_bank1; + v->arch.vmce.mcg_ext_ctl = ctxt->mcg_ext_ctl; return 0; } @@ -200,6 +201,26 @@ int vmce_rdmsr(uint32_t msr, uint64_t *val) mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CTL %#"PRIx64"\n", cur, *val); break; + case MSR_IA32_MCG_EXT_CTL: + /* + * If MCG_LMCE_P is present in guest MSR_IA32_MCG_CAP, the LMCE and LOCK + * bits are always set in guest MSR_IA32_FEATURE_CONTROL by Xen, so it + * does not need to check them here. + */ + if ( cur->arch.vmce.mcg_cap & MCG_LMCE_P ) + { + *val = cur->arch.vmce.mcg_ext_ctl; + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL %#"PRIx64"\n", + cur, *val); + } + else + { + ret = -1; + mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL, not supported\n", + cur); + } + break; + default: ret = mce_bank_msr(cur, msr) ? bank_mce_rdmsr(cur, msr, val) : 0; break; @@ -309,6 +330,16 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) mce_printk(MCE_VERBOSE, "MCE: %pv: MCG_CAP is r/o\n", cur); break; + case MSR_IA32_MCG_EXT_CTL: + if ( (cur->arch.vmce.mcg_cap & MCG_LMCE_P) && + !(val & ~MCG_EXT_CTL_LMCE_EN) ) + cur->arch.vmce.mcg_ext_ctl = val; + else + ret = -1; + mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_EXT_CTL %"PRIx64"%s\n", + cur, val, (ret == -1) ? ", not supported" : ""); + break; + default: ret = mce_bank_msr(cur, msr) ? bank_mce_wrmsr(cur, msr, val) : 0; break; @@ -327,7 +358,8 @@ static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) struct hvm_vmce_vcpu ctxt = { .caps = v->arch.vmce.mcg_cap, .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2, - .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2 + .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2, + .mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl, }; err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt); diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index f40e989fd8..4ae9fc6451 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -871,6 +871,7 @@ long arch_do_domctl( evc->vmce.caps = v->arch.vmce.mcg_cap; evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2; evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2; + evc->vmce.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl; ret = 0; vcpu_unpause(v); @@ -878,6 +879,8 @@ long arch_do_domctl( } else { + static const unsigned long vmce_off = offsetof(typeof(*evc), vmce); + if ( d == currd ) /* no domain_pause() */ break; ret = -EINVAL; @@ -917,9 +920,21 @@ long arch_do_domctl( offsetof(struct xen_domctl_ext_vcpucontext, vmce.caps)); BUILD_BUG_ON(sizeof(evc->mcg_cap) != sizeof(evc->vmce.caps)); - if ( evc->size >= offsetof(typeof(*evc), vmce) + - sizeof(evc->vmce) ) + if ( evc->size >= vmce_off + sizeof(evc->vmce) ) ret = vmce_restore_vcpu(v, &evc->vmce); + else if ( evc->size >= vmce_off + + offsetof(typeof(evc->vmce), mcg_ext_ctl) ) + { + /* + * If migrating from old Xen that uses a smaller 'struct + * hvm_vmce_vcpu', only restore the components in the + * old version. + */ + struct hvm_vmce_vcpu vmce = { 0 }; + + memcpy(&vmce, &evc->vmce, evc->size - vmce_off); + ret = vmce_restore_vcpu(v, &vmce); + } else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) + sizeof(evc->mcg_cap) ) { diff --git a/xen/include/asm-x86/mce.h b/xen/include/asm-x86/mce.h index 56ad1f92dd..35f9962638 100644 --- a/xen/include/asm-x86/mce.h +++ b/xen/include/asm-x86/mce.h @@ -27,6 +27,7 @@ struct vmce_bank { struct vmce { uint64_t mcg_cap; uint64_t mcg_status; + uint64_t mcg_ext_ctl; spinlock_t lock; struct vmce_bank bank[GUEST_MC_BANK_NUM]; }; diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h index 816973b9c2..fd7bf3fb38 100644 --- a/xen/include/public/arch-x86/hvm/save.h +++ b/xen/include/public/arch-x86/hvm/save.h @@ -610,6 +610,7 @@ struct hvm_vmce_vcpu { uint64_t caps; uint64_t mci_ctl2_bank0; uint64_t mci_ctl2_bank1; + uint64_t mcg_ext_ctl; }; DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
If MCG_LMCE_P is present in guest MSR_IA32_MCG_CAP, then allow guest to read/write MSR_IA32_MCG_EXT_CTL. Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> --- Changes in v6: * Remove Jan's R-b. * Handle hvm_vmce_vcpu::mcg_ext_ctl in XEN_DOMCTL_{set,get}_ext_vcpucontext. --- xen/arch/x86/cpu/mcheck/vmce.c | 34 +++++++++++++++++++++++++++++++++- xen/arch/x86/domctl.c | 19 +++++++++++++++++-- xen/include/asm-x86/mce.h | 1 + xen/include/public/arch-x86/hvm/save.h | 1 + 4 files changed, 52 insertions(+), 3 deletions(-)