@@ -14,6 +14,12 @@ struct tag;
struct meminfo;
struct sys_timer;
struct pt_regs;
+struct smp_operations;
+#ifdef CONFIG_SMP
+#define smp_ops(ops) (&(ops))
+#else
+#define smp_ops(ops) (struct smp_operations *)NULL
+#endif
struct machine_desc {
unsigned int nr; /* architecture number */
@@ -35,6 +41,7 @@ struct machine_desc {
unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
+ struct smp_operations *smp; /* SMP operations */
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
@@ -93,4 +93,37 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+struct smp_operations {
+#ifdef CONFIG_SMP
+ /*
+ * Setup the set of possible CPUs (via set_cpu_possible)
+ */
+ void (*smp_init_cpus)(void);
+ /*
+ * Initialize cpu_possible map, and enable coherency
+ */
+ void (*smp_prepare_cpus)(unsigned int max_cpus);
+
+ /*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+ void (*smp_secondary_init)(unsigned int cpu);
+ /*
+ * Boot a secondary CPU, and assign it the specified idle task.
+ * This also gives us the initial stack to use for this CPU.
+ */
+ int (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_kill)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ int (*cpu_disable)(unsigned int cpu);
+#endif
+#endif
+};
+
+/*
+ * set platform specific SMP operations
+ */
+extern void smp_set_ops(struct smp_operations *);
+
#endif /* ifndef __ASM_ARM_SMP_H */
@@ -977,8 +977,10 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
#ifdef CONFIG_SMP
- if (is_smp())
+ if (is_smp()) {
+ smp_set_ops(mdesc->smp);
smp_init_cpus();
+ }
#endif
reserve_crashkernel();
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
-#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/percpu.h>
@@ -27,6 +26,7 @@
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -42,6 +42,7 @@
#include <asm/ptrace.h>
#include <asm/localtimer.h>
#include <asm/smp_plat.h>
+#include <asm/mach/arch.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -60,6 +61,14 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
+static struct smp_operations smp_ops;
+
+void __init smp_set_ops(struct smp_operations *ops)
+{
+ if (ops)
+ smp_ops = *ops;
+};
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -121,9 +130,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
return ret;
}
+/* platform specific SMP operations */
+void __attribute__((weak)) __init smp_init_cpus(void)
+{
+ if (smp_ops.smp_init_cpus)
+ smp_ops.smp_init_cpus();
+}
+
+void __attribute__((weak)) __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ if (smp_ops.smp_prepare_cpus)
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+
+void __attribute__((weak)) __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ if (smp_ops.smp_secondary_init)
+ smp_ops.smp_secondary_init(cpu);
+}
+
+int __attribute__((weak)) __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ if (smp_ops.smp_boot_secondary)
+ return smp_ops.smp_boot_secondary(cpu, idle);
+ return -ENOSYS;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);
+int __attribute__((weak)) platform_cpu_kill(unsigned int cpu)
+{
+ if (smp_ops.cpu_kill)
+ return smp_ops.cpu_kill(cpu);
+ return 1;
+}
+
+void __attribute__((weak)) platform_cpu_die(unsigned int cpu)
+{
+ if (smp_ops.cpu_die)
+ smp_ops.cpu_die(cpu);
+}
+
+int __attribute__((weak)) platform_cpu_disable(unsigned int cpu)
+{
+ if (smp_ops.cpu_disable)
+ return smp_ops.cpu_disable(cpu);
+
+ /*
+ * By default, allow disabling all CPUs except the first one,
+ * since this is special on a lot of platforms, e.g. because
+ * of clock tick interrupts.
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
/*
* __cpu_disable runs on the processor to be shutdown.
*/