@@ -352,9 +352,10 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
static inline void __runq_tickle(struct csched_unit *new)
{
unsigned int cpu = sched_unit_cpu(new->unit);
+ struct sched_resource *sd = get_sched_res(cpu);
struct sched_unit *unit = new->unit;
struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
- struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+ struct csched_private *prv = CSCHED_PRIV(sd->scheduler);
cpumask_t mask, idle_mask, *online;
int balance_step, idlers_empty;
@@ -931,7 +932,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
{
struct sched_unit *currunit = current->sched_unit;
struct csched_unit * const svc = CSCHED_UNIT(currunit);
- const struct scheduler *ops = per_cpu(scheduler, cpu);
+ struct sched_resource *sd = get_sched_res(cpu);
+ const struct scheduler *ops = sd->scheduler;
ASSERT( sched_unit_cpu(currunit) == cpu );
ASSERT( svc->sdom != NULL );
@@ -987,8 +989,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
* idlers. But, if we are here, it means there is someone running
* on it, and hence the bit must be zero already.
*/
- ASSERT(!cpumask_test_cpu(cpu,
- CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
+ ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
}
@@ -1083,6 +1084,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
{
struct csched_unit * const svc = CSCHED_UNIT(unit);
unsigned int cpu = sched_unit_cpu(unit);
+ struct sched_resource *sd = get_sched_res(cpu);
SCHED_STAT_CRANK(unit_sleep);
@@ -1095,7 +1097,7 @@ csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
* But, we are here because unit is going to sleep while running on cpu,
* so the bit must be zero already.
*/
- ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
+ ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
else if ( __unit_on_runq(svc) )
@@ -1575,8 +1577,9 @@ static void
csched_tick(void *_cpu)
{
unsigned int cpu = (unsigned long)_cpu;
+ struct sched_resource *sd = get_sched_res(cpu);
struct csched_pcpu *spc = CSCHED_PCPU(cpu);
- struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+ struct csched_private *prv = CSCHED_PRIV(sd->scheduler);
spc->tick++;
@@ -1601,7 +1604,8 @@ csched_tick(void *_cpu)
static struct csched_unit *
csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
{
- const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+ struct sched_resource *sd = get_sched_res(cpu);
+ const struct csched_private * const prv = CSCHED_PRIV(sd->scheduler);
const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
struct csched_unit *speer;
struct list_head *iter;
@@ -3266,8 +3266,9 @@ runq_candidate(struct csched2_runqueue_data *rqd,
unsigned int *skipped)
{
struct list_head *iter, *temp;
+ struct sched_resource *sd = get_sched_res(cpu);
struct csched2_unit *snext = NULL;
- struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
+ struct csched2_private *prv = csched2_priv(sd->scheduler);
bool yield = false, soft_aff_preempt = false;
*skipped = 0;
@@ -69,7 +69,6 @@ static void vcpu_singleshot_timer_fn(void *data);
static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct scheduler *, scheduler);
DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
static DEFINE_PER_CPU_READ_MOSTLY(unsigned int, sched_res_idx);
@@ -188,7 +187,7 @@ static inline struct scheduler *unit_scheduler(const struct sched_unit *unit)
*/
ASSERT(is_idle_domain(d));
- return per_cpu(scheduler, unit->res->processor);
+ return unit->res->scheduler;
}
static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
@@ -1876,8 +1875,8 @@ static bool sched_tasklet_check(unsigned int cpu)
static struct sched_unit *do_schedule(struct sched_unit *prev, s_time_t now,
unsigned int cpu)
{
- struct scheduler *sched = per_cpu(scheduler, cpu);
struct sched_resource *sd = get_sched_res(cpu);
+ struct scheduler *sched = sd->scheduler;
struct sched_unit *next;
/* get policy-specific decision on scheduling... */
@@ -2285,7 +2284,7 @@ static int cpu_schedule_up(unsigned int cpu)
sd->cpus = cpumask_of(cpu);
set_sched_res(cpu, sd);
- per_cpu(scheduler, cpu) = &sched_idle_ops;
+ sd->scheduler = &sched_idle_ops;
spin_lock_init(&sd->_lock);
sd->schedule_lock = &sched_free_cpu_lock;
init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
@@ -2496,7 +2495,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
{
struct vcpu *idle;
void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
- struct scheduler *old_ops = per_cpu(scheduler, cpu);
+ struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
struct cpupool *old_pool = per_cpu(cpupool, cpu);
struct sched_resource *sd = get_sched_res(cpu);
@@ -2560,7 +2559,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
ppriv_old = sd->sched_priv;
new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv);
- per_cpu(scheduler, cpu) = new_ops;
+ sd->scheduler = new_ops;
sd->sched_priv = ppriv;
/*
@@ -2660,7 +2659,7 @@ void sched_tick_suspend(void)
struct scheduler *sched;
unsigned int cpu = smp_processor_id();
- sched = per_cpu(scheduler, cpu);
+ sched = get_sched_res(cpu)->scheduler;
sched_do_tick_suspend(sched, cpu);
rcu_idle_enter(cpu);
rcu_idle_timer_start();
@@ -2673,7 +2672,7 @@ void sched_tick_resume(void)
rcu_idle_timer_stop();
rcu_idle_exit(cpu);
- sched = per_cpu(scheduler, cpu);
+ sched = get_sched_res(cpu)->scheduler;
sched_do_tick_resume(sched, cpu);
}
@@ -36,6 +36,7 @@ extern const cpumask_t *sched_res_mask;
* as the rest of the struct. Just have the scheduler point to the
* one it wants (This may be the one right in front of it).*/
struct sched_resource {
+ struct scheduler *scheduler;
spinlock_t *schedule_lock,
_lock;
struct sched_unit *curr; /* current task */
@@ -50,7 +51,6 @@ struct sched_resource {
#define curr_on_cpu(c) (get_sched_res(c)->curr)
-DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
DECLARE_PER_CPU(struct sched_resource *, sched_res);
Having a pointer to struct scheduler in struct sched_resource instead of per cpu is enough. Signed-off-by: Juergen Gross <jgross@suse.com> --- V1: new patch --- xen/common/sched_credit.c | 18 +++++++++++------- xen/common/sched_credit2.c | 3 ++- xen/common/schedule.c | 15 +++++++-------- xen/include/xen/sched-if.h | 2 +- 4 files changed, 21 insertions(+), 17 deletions(-)