@@ -24,7 +24,6 @@ static u32 *x86_cpu_to_logical_apicid __read_mostly;
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
-static struct cluster_mask *cluster_hotplug_mask;
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
@@ -106,54 +105,71 @@ static u32 x2apic_calc_apicid(unsigned int cpu)
static void init_x2apic_ldr(void)
{
struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
- u32 cluster, apicid = apic_read(APIC_LDR);
- unsigned int cpu;
- x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;
+ BUG_ON(!cmsk);
- if (cmsk)
- goto update;
-
- cluster = apicid >> 16;
- for_each_online_cpu(cpu) {
- cmsk = per_cpu(cluster_masks, cpu);
- /* Matching cluster found. Link and update it. */
- if (cmsk && cmsk->clusterid == cluster)
- goto update;
- }
- cmsk = cluster_hotplug_mask;
- cmsk->clusterid = cluster;
- cluster_hotplug_mask = NULL;
-update:
- this_cpu_write(cluster_masks, cmsk);
cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
}
-static int alloc_clustermask(unsigned int cpu, int node)
+static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
{
+ struct cluster_mask *cmsk = NULL;
+ unsigned int cpu_i;
+ u32 apicid;
+
if (per_cpu(cluster_masks, cpu))
return 0;
- /*
- * If a hotplug spare mask exists, check whether it's on the right
- * node. If not, free it and allocate a new one.
+
+ /* For the hotplug case, don't always allocate a new one */
+ for_each_present_cpu(cpu_i) {
+ apicid = apic->cpu_present_to_apicid(cpu_i);
+ if (apicid != BAD_APICID && apicid >> 4 == cluster) {
+ cmsk = per_cpu(cluster_masks, cpu_i);
+ if (cmsk)
+ break;
+ }
+ }
+ if (!cmsk) {
+ cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node);
+ }
+ if (!cmsk)
+ return -ENOMEM;
+
+ cmsk->node = node;
+ cmsk->clusterid = cluster;
+
+ per_cpu(cluster_masks, cpu) = cmsk;
+
+ /*
+ * As an optimisation during boot, set the cluster_mask for *all*
+ * present CPUs at once, to prevent *each* of them having to iterate
+ * over the others to find the existing cluster_mask.
*/
- if (cluster_hotplug_mask) {
- if (cluster_hotplug_mask->node == node)
- return 0;
- kfree(cluster_hotplug_mask);
+ if (system_state < SYSTEM_RUNNING) {
+ for_each_present_cpu(cpu) {
+ u32 apicid = apic->cpu_present_to_apicid(cpu);
+ if (apicid != BAD_APICID && apicid >> 4 == cluster) {
+ struct cluster_mask **cpu_cmsk = &per_cpu(cluster_masks, cpu);
+ if (*cpu_cmsk)
+ BUG_ON(*cpu_cmsk != cmsk);
+ else
+ *cpu_cmsk = cmsk;
+ }
+ }
}
- cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
- GFP_KERNEL, node);
- if (!cluster_hotplug_mask)
- return -ENOMEM;
- cluster_hotplug_mask->node = node;
return 0;
}
static int x2apic_prepare_cpu(unsigned int cpu)
{
- if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
+ u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
+ u32 cluster = phys_apicid >> 4;
+ u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
+
+ x86_cpu_to_logical_apicid[cpu] = logical_apicid;
+
+ if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0)
return -ENOMEM;
if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
return -ENOMEM;