@@ -1256,7 +1256,7 @@ int get_cpu_id(u32 acpi_id)
for ( i = 0; i < nr_cpu_ids; i++ )
{
- if ( apic_id == x86_cpu_to_apicid[i] )
+ if ( apic_id == cpu_physical_id(i) )
return i;
}
@@ -1362,7 +1362,7 @@ long set_cx_pminfo(uint32_t acpi_id, struct xen_processor_power *power)
if ( !cpu_online(cpu_id) )
{
- uint32_t apic_id = x86_cpu_to_apicid[cpu_id];
+ uint32_t apic_id = cpu_physical_id(cpu_id);
/*
* If we've just learned of more available C states, wake the CPU if
@@ -91,7 +91,7 @@ unsigned int acpi_get_processor_id(unsigned int cpu)
{
unsigned int acpiid, apicid;
- if ((apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID)
+ if ((apicid = cpu_physical_id(cpu)) == BAD_APICID)
return INVALID_ACPIID;
for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++)
@@ -950,7 +950,7 @@ __next:
*/
if (boot_cpu_physical_apicid == -1U)
boot_cpu_physical_apicid = get_apic_id();
- x86_cpu_to_apicid[0] = get_apic_id();
+ cpu_physical_id(0) = get_apic_id();
ioapic_init();
}
@@ -1202,8 +1202,8 @@ static void __init ivt_idle_state_table_update(void)
unsigned int cpu, max_apicid = boot_cpu_physical_apicid;
for_each_present_cpu(cpu)
- if (max_apicid < x86_cpu_to_apicid[cpu])
- max_apicid = x86_cpu_to_apicid[cpu];
+ if (max_apicid < cpu_physical_id(cpu))
+ max_apicid = cpu_physical_id(cpu);
switch (apicid_to_socket(max_apicid)) {
case 0: case 1:
/* 1 and 2 socket systems use default ivt_cstates */
@@ -1615,7 +1615,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
cpu_id.phys_id =
- (uint64_t)x86_cpu_to_apicid[v->vcpu_id] |
+ (uint64_t)cpu_physical_id(v->vcpu_id) |
((uint64_t)acpi_get_processor_id(v->vcpu_id) << 32);
rc = -EFAULT;
@@ -187,7 +187,7 @@ static int MP_processor_info_x(struct mpc_config_processor *m,
" Processor with apicid %i ignored\n", apicid);
return cpu;
}
- x86_cpu_to_apicid[cpu] = apicid;
+ cpu_physical_id(cpu) = apicid;
cpumask_set_cpu(cpu, &cpu_present_map);
}
@@ -822,12 +822,12 @@ void mp_unregister_lapic(uint32_t apic_id, uint32_t cpu)
if (!cpu || (apic_id == boot_cpu_physical_apicid))
return;
- if (x86_cpu_to_apicid[cpu] != apic_id)
+ if (cpu_physical_id(cpu) != apic_id)
return;
physid_clear(apic_id, phys_cpu_present_map);
- x86_cpu_to_apicid[cpu] = BAD_APICID;
+ cpu_physical_id(cpu) = BAD_APICID;
cpumask_clear_cpu(cpu, &cpu_present_map);
}
@@ -70,7 +70,7 @@ void __init init_cpu_to_node(void)
for ( i = 0; i < nr_cpu_ids; i++ )
{
- u32 apicid = x86_cpu_to_apicid[i];
+ u32 apicid = cpu_physical_id(i);
if ( apicid == BAD_APICID )
continue;
node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE;
@@ -588,7 +588,7 @@ ret_t do_platform_op(
}
else
{
- g_info->apic_id = x86_cpu_to_apicid[g_info->xen_cpuid];
+ g_info->apic_id = cpu_physical_id(g_info->xen_cpuid);
g_info->acpi_id = acpi_get_processor_id(g_info->xen_cpuid);
ASSERT(g_info->apic_id != BAD_APICID);
g_info->flags = 0;
@@ -319,7 +319,7 @@ static void __init init_idle_domain(void)
void srat_detect_node(int cpu)
{
nodeid_t node;
- u32 apicid = x86_cpu_to_apicid[cpu];
+ u32 apicid = cpu_physical_id(cpu);
node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE;
if ( node == NUMA_NO_NODE )
@@ -346,7 +346,7 @@ static void __init normalise_cpu_order(void)
for_each_present_cpu ( i )
{
- apicid = x86_cpu_to_apicid[i];
+ apicid = cpu_physical_id(i);
min_diff = min_cpu = ~0u;
/*
@@ -357,12 +357,12 @@ static void __init normalise_cpu_order(void)
j < nr_cpu_ids;
j = cpumask_next(j, &cpu_present_map) )
{
- diff = x86_cpu_to_apicid[j] ^ apicid;
+ diff = cpu_physical_id(j) ^ apicid;
while ( diff & (diff-1) )
diff &= diff-1;
if ( (diff < min_diff) ||
((diff == min_diff) &&
- (x86_cpu_to_apicid[j] < x86_cpu_to_apicid[min_cpu])) )
+ (cpu_physical_id(j) < cpu_physical_id(min_cpu))) )
{
min_diff = diff;
min_cpu = j;
@@ -378,9 +378,9 @@ static void __init normalise_cpu_order(void)
/* Switch the best-matching CPU with the next CPU in logical order. */
j = cpumask_next(i, &cpu_present_map);
- apicid = x86_cpu_to_apicid[min_cpu];
- x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j];
- x86_cpu_to_apicid[j] = apicid;
+ apicid = cpu_physical_id(min_cpu);
+ cpu_physical_id(min_cpu) = cpu_physical_id(j);
+ cpu_physical_id(j) = apicid;
}
}
@@ -1154,7 +1154,7 @@ void __init smp_prepare_cpus(void)
print_cpu_info(0);
boot_cpu_physical_apicid = get_apic_id();
- x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
+ cpu_physical_id(0) = boot_cpu_physical_apicid;
stack_base[0] = (void *)((unsigned long)stack_start & ~(STACK_SIZE - 1));
@@ -1374,7 +1374,7 @@ int __cpu_up(unsigned int cpu)
{
int apicid, ret;
- if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID )
+ if ( (apicid = cpu_physical_id(cpu)) == BAD_APICID )
return -ENODEV;
if ( (!x2apic_enabled && apicid >= APIC_ALL_CPUS) ||
@@ -589,7 +589,7 @@ static bool __init check_smt_enabled(void)
* has a non-zero thread id component indicates that SMT is active.
*/
for_each_present_cpu ( cpu )
- if ( x86_cpu_to_apicid[cpu] & (boot_cpu_data.x86_num_siblings - 1) )
+ if ( cpu_physical_id(cpu) & (boot_cpu_data.x86_num_siblings - 1) )
return true;
return false;
@@ -58,7 +58,7 @@ static long cf_check smt_up_down_helper(void *data)
for_each_present_cpu ( cpu )
{
/* Skip primary siblings (those whose thread id is 0). */
- if ( !(x86_cpu_to_apicid[cpu] & sibling_mask) )
+ if ( !(cpu_physical_id(cpu) & sibling_mask) )
continue;
if ( !up && core_parking_remove(cpu) )
This is done in preparation to move data from x86_cpu_to_apicid[] elsewhere. Signed-off-by: Krystian Hebel <krystian.hebel@3mdeb.com> --- xen/arch/x86/acpi/cpu_idle.c | 4 ++-- xen/arch/x86/acpi/lib.c | 2 +- xen/arch/x86/apic.c | 2 +- xen/arch/x86/cpu/mwait-idle.c | 4 ++-- xen/arch/x86/domain.c | 2 +- xen/arch/x86/mpparse.c | 6 +++--- xen/arch/x86/numa.c | 2 +- xen/arch/x86/platform_hypercall.c | 2 +- xen/arch/x86/setup.c | 14 +++++++------- xen/arch/x86/smpboot.c | 4 ++-- xen/arch/x86/spec_ctrl.c | 2 +- xen/arch/x86/sysctl.c | 2 +- 12 files changed, 23 insertions(+), 23 deletions(-)