@@ -107,6 +107,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
struct segment_register cs, ds, ss, es, tr;
const char *errstr;
int rc;
+ unsigned long valid;
if ( ctx->pad != 0 )
return -EINVAL;
@@ -264,10 +265,12 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
if ( v->arch.hvm.guest_efer & EFER_LME )
v->arch.hvm.guest_efer |= EFER_LMA;
- if ( v->arch.hvm.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d) )
+ valid = hvm_cr4_guest_valid_bits(d);
+ if ( v->arch.hvm.guest_cr[4] & ~valid )
{
- gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
- v->arch.hvm.guest_cr[4]);
+ gprintk(XENLOG_ERR, "Bad CR4 value: %#lx (valid: %#lx, rejected: %#lx)\n",
+ v->arch.hvm.guest_cr[4], valid,
+ v->arch.hvm.guest_cr[4] & ~valid);
return -EINVAL;
}
@@ -991,6 +991,7 @@ static int cf_check hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
struct hvm_hw_cpu ctxt;
struct segment_register seg;
const char *errstr;
+ unsigned long valid;
/* Which vcpu is this? */
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -1016,10 +1017,11 @@ static int cf_check hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
return -EINVAL;
}
- if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(d) )
+ valid = hvm_cr4_guest_valid_bits(d);
+ if ( ctxt.cr4 & ~valid )
{
- printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n",
- d->domain_id, ctxt.cr4);
+ printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#lx (valid: %#lx, rejected %#lx)\n",
+ d->domain_id, ctxt.cr4, valid, ctxt.cr4 & ~valid);
return -EINVAL;
}
@@ -2456,13 +2458,13 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer)
int hvm_set_cr4(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
- unsigned long old_cr;
+ unsigned long old_cr, valid = hvm_cr4_guest_valid_bits(v->domain);
- if ( value & ~hvm_cr4_guest_valid_bits(v->domain) )
+ if ( value & ~valid )
{
HVM_DBG_LOG(DBG_LEVEL_1,
- "Guest attempts to set reserved bit in CR4: %lx",
- value);
+ "Guest attempts to set reserved bit in CR4: %#lx (valid: %#lx, rejected %#lx)",
+ value, valid, value & ~valid);
return X86EMUL_EXCEPTION;
}
@@ -124,8 +124,8 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
valid = hvm_cr4_guest_valid_bits(v->domain);
if ( cr4 & ~valid )
- PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n",
- cr4, valid);
+ PRINTF("CR4: invalid value: %#lx (valid: %#lx, rejected: %#lx)\n",
+ cr4, valid, cr4 & ~valid);
if ( vmcb_get_dr6(vmcb) >> 32 )
PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n",
Some of the current users of hvm_cr4_guest_valid_bits() don't print the valid mask in case of error, and thus the resulting error messages are not as helpful as they could be. Amend callers to always print the value of hvm_cr4_guest_valid_bits() together with the rejected bits in the checked value. Also take the opportunity and adjust all the users to use the same print formatter. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Changes since v1: - Use %#lx formatter uniformly. - Print rejected bits together with the valid mask. --- xen/arch/x86/hvm/domain.c | 9 ++++++--- xen/arch/x86/hvm/hvm.c | 16 +++++++++------- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++-- 3 files changed, 17 insertions(+), 12 deletions(-)