@@ -1191,7 +1191,7 @@ static void __init calibrate_APIC_clock(void)
* Setup the APIC counter to maximum. There is no way the lapic
* can underflow in the 100ms detection time frame.
*/
- __setup_APIC_LVTT(0xffffffff);
+ __setup_APIC_LVTT(0xffffffffU);
bus_freq = calibrate_apic_timer();
if ( !bus_freq )
@@ -321,7 +321,7 @@ static void recalculate_misc(struct cpu_policy *p)
p->extd.vendor_edx = p->basic.vendor_edx;
p->extd.raw_fms = p->basic.raw_fms;
- p->extd.raw[0x1].b &= 0xff00ffff;
+ p->extd.raw[0x1].b &= 0xff00ffffU;
p->extd.e1d |= p->basic._1d & CPUID_COMMON_1D_FEATURES;
p->extd.raw[0x8].a &= 0x0000ffff; /* GuestMaxPhysAddr hidden. */
@@ -378,10 +378,10 @@ static void __init calculate_host_policy(void)
* this information.
*/
if ( cpu_has_lfence_dispatch )
- max_extd_leaf = max(max_extd_leaf, 0x80000021);
+ max_extd_leaf = max(max_extd_leaf, 0x80000021U);
- p->extd.max_leaf = 0x80000000 | min_t(uint32_t, max_extd_leaf & 0xffff,
- ARRAY_SIZE(p->extd.raw) - 1);
+ p->extd.max_leaf = 0x80000000U | min_t(uint32_t, max_extd_leaf & 0xffff,
+ ARRAY_SIZE(p->extd.raw) - 1);
x86_cpu_featureset_to_policy(boot_cpu_data.x86_capability, p);
recalculate_xstate(p);
@@ -793,11 +793,11 @@ void recalculate_cpuid_policy(struct domain *d)
p->basic.max_leaf = min(p->basic.max_leaf, max->basic.max_leaf);
p->feat.max_subleaf = min(p->feat.max_subleaf, max->feat.max_subleaf);
- p->extd.max_leaf = 0x80000000 | min(p->extd.max_leaf & 0xffff,
- ((p->x86_vendor & (X86_VENDOR_AMD |
- X86_VENDOR_HYGON))
- ? CPUID_GUEST_NR_EXTD_AMD
- : CPUID_GUEST_NR_EXTD_INTEL) - 1);
+ p->extd.max_leaf = 0x80000000U | min(p->extd.max_leaf & 0xffff,
+ ((p->x86_vendor & (X86_VENDOR_AMD |
+ X86_VENDOR_HYGON))
+ ? CPUID_GUEST_NR_EXTD_AMD
+ : CPUID_GUEST_NR_EXTD_INTEL) - 1);
x86_cpu_policy_to_featureset(p, fs);
x86_cpu_policy_to_featureset(max, max_fs);
@@ -93,7 +93,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
}
break;
- case 0x40000000 ... 0x400000ff:
+ case 0x40000000U ... 0x400000ffU:
if ( is_viridian_domain(d) )
return cpuid_viridian_leaves(v, leaf, subleaf, res);
@@ -103,10 +103,10 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
* Intel reserve up until 0x4fffffff for hypervisor use. AMD reserve
* only until 0x400000ff, but we already use double that.
*/
- case 0x40000100 ... 0x400001ff:
+ case 0x40000100U ... 0x400001ffU:
return cpuid_hypervisor_leaves(v, leaf, subleaf, res);
- case 0x80000000 ... 0x80000000 + CPUID_GUEST_NR_EXTD - 1:
+ case 0x80000000U ... 0x80000000U + CPUID_GUEST_NR_EXTD - 1:
ASSERT((p->extd.max_leaf & 0xffff) < ARRAY_SIZE(p->extd.raw));
if ( (leaf & 0xffff) > min_t(uint32_t, p->extd.max_leaf & 0xffff,
ARRAY_SIZE(p->extd.raw) - 1) )
@@ -352,7 +352,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
}
break;
- case 0x80000001:
+ case 0x80000001U:
/* SYSCALL is hidden outside of long mode on Intel. */
if ( p->x86_vendor == X86_VENDOR_INTEL &&
is_hvm_domain(d) && !hvm_long_mode_active(v) )
@@ -141,7 +141,7 @@ static int __init cf_check stub_selftest(void)
.rax = 0x0123456789abcdef,
.res.fields.trapnr = X86_EXC_GP },
{ .opc = { endbr64, 0x02, 0x04, 0x04, 0xc3 }, /* add (%rsp,%rax),%al */
- .rax = 0xfedcba9876543210,
+ .rax = 0xfedcba9876543210UL,
.res.fields.trapnr = X86_EXC_SS },
{ .opc = { endbr64, 0xcc, 0xc3, 0xc3, 0xc3 }, /* int3 */
.res.fields.trapnr = X86_EXC_BP },
@@ -12,7 +12,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
* possible #PF at (NULL + a little) which has security implications in the
* context of PV guests.
*/
-#define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
+#define INVALID_PERCPU_AREA (0x8000000000000000UL - (unsigned long)__per_cpu_start)
#define PERCPU_ORDER get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)
void __init percpu_init_areas(void)
@@ -191,7 +191,7 @@ static struct feat_node *feat_l2_cat;
static struct feat_node *feat_mba;
/* Common functions */
-#define cat_default_val(len) (0xffffffff >> (32 - (len)))
+#define cat_default_val(len) (0xffffffffU >> (32 - (len)))
/*
* get_cdp_data - get DATA COS register value from input COS ID.
@@ -406,10 +406,10 @@ static void __init print_details(enum ind_thunk thunk)
cpuid_count(7, 0, &max, &tmp, &tmp, &_7d0);
if ( max >= 2 )
cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2);
- if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 )
- cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp);
- if ( boot_cpu_data.extended_cpuid_level >= 0x80000021 )
- cpuid(0x80000021, &e21a, &tmp, &tmp, &tmp);
+ if ( boot_cpu_data.extended_cpuid_level >= 0x80000008U )
+ cpuid(0x80000008U, &tmp, &e8b, &tmp, &tmp);
+ if ( boot_cpu_data.extended_cpuid_level >= 0x80000021U )
+ cpuid(0x80000021U, &e21a, &tmp, &tmp, &tmp);
if ( cpu_has_arch_caps )
rdmsrl(MSR_ARCH_CAPABILITIES, caps);
@@ -1612,8 +1612,8 @@ void __init init_speculation_mitigations(void)
* TODO: Adjust cpu_has_svm_spec_ctrl to be usable earlier on boot.
*/
if ( opt_msr_sc_hvm &&
- (boot_cpu_data.extended_cpuid_level >= 0x8000000a) &&
- (cpuid_edx(0x8000000a) & (1u << SVM_FEATURE_SPEC_CTRL)) )
+ (boot_cpu_data.extended_cpuid_level >= 0x8000000aU) &&
+ (cpuid_edx(0x8000000aU) & (1u << SVM_FEATURE_SPEC_CTRL)) )
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}