@@ -527,8 +527,6 @@ static void *
csched_alloc_pdata(const struct scheduler *ops, int cpu)
{
struct csched_pcpu *spc;
- struct csched_private *prv = CSCHED_PRIV(ops);
- unsigned long flags;
/* Allocate per-PCPU info */
spc = xzalloc(struct csched_pcpu);
@@ -541,6 +539,19 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
return ERR_PTR(-ENOMEM);
}
+ return spc;
+}
+
+static void
+csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
+{
+ struct csched_private *prv = CSCHED_PRIV(ops);
+ struct csched_pcpu * const spc = pdata;
+ unsigned long flags;
+
+ /* cpu data needs to be allocated, but STILL uninitialized */
+ ASSERT(spc && spc->runq.next == NULL && spc->runq.prev == NULL);
+
spin_lock_irqsave(&prv->lock, flags);
/* Initialize/update system-wide config */
@@ -561,16 +572,12 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu)
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = prv->runq_sort;
spc->idle_bias = nr_cpu_ids - 1;
- if ( per_cpu(schedule_data, cpu).sched_priv == NULL )
- per_cpu(schedule_data, cpu).sched_priv = spc;
/* Start off idling... */
BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
cpumask_set_cpu(cpu, prv->idlers);
spin_unlock_irqrestore(&prv->lock, flags);
-
- return spc;
}
#ifndef NDEBUG
@@ -2054,6 +2061,7 @@ static const struct scheduler sched_credit_def = {
.alloc_vdata = csched_alloc_vdata,
.free_vdata = csched_free_vdata,
.alloc_pdata = csched_alloc_pdata,
+ .init_pdata = csched_init_pdata,
.free_pdata = csched_free_pdata,
.alloc_domdata = csched_alloc_domdata,
.free_domdata = csched_free_domdata,
@@ -1971,7 +1971,8 @@ static void deactivate_runqueue(struct csched2_private *prv, int rqi)
cpumask_clear_cpu(rqi, &prv->active_queues);
}
-static void init_pcpu(const struct scheduler *ops, int cpu)
+static void
+csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
{
unsigned rqi;
unsigned long flags;
@@ -1981,12 +1982,7 @@ static void init_pcpu(const struct scheduler *ops, int cpu)
spin_lock_irqsave(&prv->lock, flags);
- if ( cpumask_test_cpu(cpu, &prv->initialized) )
- {
- printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
- spin_unlock_irqrestore(&prv->lock, flags);
- return;
- }
+ ASSERT(!cpumask_test_cpu(cpu, &prv->initialized));
/* Figure out which runqueue to put it in */
rqi = 0;
@@ -2036,20 +2032,6 @@ static void init_pcpu(const struct scheduler *ops, int cpu)
return;
}
-static void *
-csched2_alloc_pdata(const struct scheduler *ops, int cpu)
-{
- /* Check to see if the cpu is online yet */
- /* Note: cpu 0 doesn't get a STARTING callback */
- if ( cpu == 0 || cpu_to_socket(cpu) != XEN_INVALID_SOCKET_ID )
- init_pcpu(ops, cpu);
- else
- printk("%s: cpu %d not online yet, deferring initializatgion\n",
- __func__, cpu);
-
- return NULL;
-}
-
static void
csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
{
@@ -2061,7 +2043,7 @@ csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
spin_lock_irqsave(&prv->lock, flags);
- BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
+ ASSERT(cpumask_test_cpu(cpu, &prv->initialized));
/* Find the old runqueue and remove this cpu from it */
rqi = prv->runq_map[cpu];
@@ -2099,49 +2081,6 @@ csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
}
static int
-csched2_cpu_starting(int cpu)
-{
- struct scheduler *ops;
-
- /* Hope this is safe from cpupools switching things around. :-) */
- ops = per_cpu(scheduler, cpu);
-
- if ( ops->alloc_pdata == csched2_alloc_pdata )
- init_pcpu(ops, cpu);
-
- return NOTIFY_DONE;
-}
-
-static int cpu_credit2_callback(
- struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch ( action )
- {
- case CPU_STARTING:
- csched2_cpu_starting(cpu);
- break;
- default:
- break;
- }
-
- return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
-}
-
-static struct notifier_block cpu_credit2_nfb = {
- .notifier_call = cpu_credit2_callback
-};
-
-static int
-csched2_global_init(void)
-{
- register_cpu_notifier(&cpu_credit2_nfb);
- return 0;
-}
-
-static int
csched2_init(struct scheduler *ops)
{
int i;
@@ -2219,12 +2158,11 @@ static const struct scheduler sched_credit2_def = {
.dump_cpu_state = csched2_dump_pcpu,
.dump_settings = csched2_dump,
- .global_init = csched2_global_init,
.init = csched2_init,
.deinit = csched2_deinit,
.alloc_vdata = csched2_alloc_vdata,
.free_vdata = csched2_free_vdata,
- .alloc_pdata = csched2_alloc_pdata,
+ .init_pdata = csched2_init_pdata,
.free_pdata = csched2_free_pdata,
.alloc_domdata = csched2_alloc_domdata,
.free_domdata = csched2_free_domdata,
@@ -666,8 +666,8 @@ rt_deinit(struct scheduler *ops)
* Point per_cpu spinlock to the global system lock;
* All cpu have same global system lock
*/
-static void *
-rt_alloc_pdata(const struct scheduler *ops, int cpu)
+static void
+rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
{
struct rt_private *prv = rt_priv(ops);
spinlock_t *old_lock;
@@ -680,6 +680,12 @@ rt_alloc_pdata(const struct scheduler *ops, int cpu)
/* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
spin_unlock_irqrestore(old_lock, flags);
+}
+
+static void *
+rt_alloc_pdata(const struct scheduler *ops, int cpu)
+{
+ struct rt_private *prv = rt_priv(ops);
if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
return ERR_PTR(-ENOMEM);
@@ -1461,6 +1467,7 @@ static const struct scheduler sched_rtds_def = {
.deinit = rt_deinit,
.alloc_pdata = rt_alloc_pdata,
.free_pdata = rt_free_pdata,
+ .init_pdata = rt_init_pdata,
.alloc_domdata = rt_alloc_domdata,
.free_domdata = rt_free_domdata,
.init_domain = rt_dom_init,