@@ -214,7 +214,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
*/
#ifdef CONFIG_SMP
if (cpuid) {
- cpu_set(cpuid, cpu_present_map);
+ set_cpu_present(cpuid, true);
cpu_up(cpuid);
}
#endif
@@ -113,7 +113,7 @@ halt_processor(void)
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
local_irq_disable();
for (;;)
;
@@ -296,13 +296,14 @@ smp_cpu_init(int cpunum)
mb();
/* Well, support 2.4 linux scheme as well. */
- if (cpu_test_and_set(cpunum, cpu_online_map))
+ if (cpu_isset(cpunum, cpu_online_map))
{
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
}
+ set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
@@ -424,8 +425,8 @@ void __init smp_prepare_boot_cpu(void)
/* Setup BSP mappings */
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
- cpu_set(bootstrap_processor, cpu_online_map);
- cpu_set(bootstrap_processor, cpu_present_map);
+ set_cpu_online(bootstrap_processor, true);
+ set_cpu_present(bootstrap_processor, true);
}
@@ -436,8 +437,7 @@ void __init smp_prepare_boot_cpu(void)
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- cpus_clear(cpu_present_map);
- cpu_set(0, cpu_present_map);
+ init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
if (!max_cpus)
commit bd071e1a371d31db243edc4714ff9e8d1ea1309e
Author: Rusty Russell <rusty@rustcorp.com.au>
Date: Mon Mar 16 14:19:37 2009 +1030
cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits.: parisc
Impact: cleanup, futureproof
In fact, all cpumask ops will only be valid (in general) for bit
numbers < nr_cpu_ids. So use that instead of NR_CPUS in various
places.
This is always safe: no cpu number can be >= nr_cpu_ids, and
nr_cpu_ids is initialized to NR_CPUS at boot.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
@@ -311,12 +311,12 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
- while ((next_cpu < NR_CPUS) &&
+ while ((next_cpu < nr_cpu_ids) &&
(!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
- if (next_cpu >= NR_CPUS)
+ if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu);
@@ -100,8 +100,8 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
struct cpuinfo_parisc *p;
#ifdef CONFIG_SMP
- if (num_online_cpus() >= NR_CPUS) {
- printk(KERN_INFO "num_online_cpus() >= NR_CPUS\n");
+ if (num_online_cpus() >= nr_cpu_ids) {
+ printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
return 1;
}
#else
commit 91887a362984324e254473e92820758c8e658f78
Author: Rusty Russell <rusty@rustcorp.com.au>
Date: Mon Mar 16 14:19:37 2009 +1030
cpumask: arch_send_call_function_ipi_mask: parisc
We're weaning the core code off handing cpumask's around on-stack.
This introduces arch_send_call_function_ipi_mask(), and by defining
it, the old arch_send_call_function_ipi is defined by the core code.
We also take the chance to change send_IPI_mask() and use the new
for_each_cpu() iterator.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
@@ -29,7 +29,8 @@ extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* !ASSEMBLY */
@@ -214,11 +214,11 @@ ipi_send(int cpu, enum ipi_message_type op)
}
static void
-send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
+send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{
int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
ipi_send(cpu, op);
}
@@ -257,7 +257,7 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, IPI_CALL_FUNC);
}