@@ -234,22 +234,53 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
0 /* Invalid value */)
+static uint32_t max_processor_ids_for_cache(X86CPUTopoInfo *topo_info,
+ enum CPUTopoLevel share_level)
+{
+ uint32_t num_ids = 0;
+
+ switch (share_level) {
+ case CPU_TOPO_LEVEL_CORE:
+ num_ids = 1 << apicid_core_offset(topo_info);
+ break;
+ case CPU_TOPO_LEVEL_DIE:
+ num_ids = 1 << apicid_die_offset(topo_info);
+ break;
+ case CPU_TOPO_LEVEL_PACKAGE:
+ num_ids = 1 << apicid_pkg_offset(topo_info);
+ break;
+ default:
+ /*
+ * Currently there is no use case for SMT and MODULE, so use
+ * assert directly to facilitate debugging.
+ */
+ g_assert_not_reached();
+ }
+
+ return num_ids - 1;
+}
+
+static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
+{
+ uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) -
+ apicid_core_offset(topo_info));
+ return num_cores - 1;
+}
/* Encode cache info for CPUID[4] */
static void encode_cache_cpuid4(CPUCacheInfo *cache,
- int num_apic_ids, int num_cores,
+ X86CPUTopoInfo *topo_info,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
assert(cache->size == cache->line_size * cache->associativity *
cache->partitions * cache->sets);
- assert(num_apic_ids > 0);
*eax = CACHE_TYPE(cache->type) |
CACHE_LEVEL(cache->level) |
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
- ((num_cores - 1) << 26) |
- ((num_apic_ids - 1) << 14);
+ (max_core_ids_in_package(topo_info) << 26) |
+ (max_processor_ids_for_cache(topo_info, cache->share_level) << 14);
assert(cache->line_size > 0);
assert(cache->partitions > 0);
@@ -6258,56 +6289,41 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
if (cores_per_pkg > 1) {
- int addressable_cores_offset =
- apicid_pkg_offset(&topo_info) -
- apicid_core_offset(&topo_info);
-
*eax &= ~0xFC000000;
- *eax |= (1 << (addressable_cores_offset - 1)) << 26;
+ *eax |= max_core_ids_in_package(&topo_info) << 26;
}
if (host_vcpus_per_cache > cpus_per_pkg) {
- int pkg_offset = apicid_pkg_offset(&topo_info);
-
*eax &= ~0x3FFC000;
- *eax |= (1 << (pkg_offset - 1)) << 14;
+ *eax |=
+ max_processor_ids_for_cache(&topo_info,
+ CPU_TOPO_LEVEL_PACKAGE) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
*eax = *ebx = *ecx = *edx = 0;
} else {
*eax = 0;
- int addressable_cores_offset = apicid_pkg_offset(&topo_info) -
- apicid_core_offset(&topo_info);
- int core_offset, die_offset;
switch (count) {
case 0: /* L1 dcache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
- die_offset = apicid_die_offset(&topo_info);
if (cpu->enable_l3_cache) {
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- (1 << die_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
}