@@ -34,8 +34,6 @@ static cpumask_t cpupool_locked_cpus;
static DEFINE_SPINLOCK(cpupool_lock);
-DEFINE_PER_CPU(struct cpupool *, cpupool);
-
#define cpupool_dprintk(x...) ((void)0)
static void free_cpupool_struct(struct cpupool *c)
@@ -510,7 +508,7 @@ static int cpupool_cpu_add(unsigned int cpu)
* (or unplugging would have failed) and that is the default behavior
* anyway.
*/
- per_cpu(cpupool, cpu) = NULL;
+ get_sched_res(cpu)->cpupool = NULL;
ret = cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
@@ -1681,7 +1681,7 @@ static struct csched_unit *
csched_load_balance(struct csched_private *prv, int cpu,
struct csched_unit *snext, bool *stolen)
{
- struct cpupool *c = per_cpu(cpupool, cpu);
+ struct cpupool *c = get_sched_res(cpu)->cpupool;
struct csched_unit *speer;
cpumask_t workers;
cpumask_t *online;
@@ -775,7 +775,7 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
if ( prv->repl_timer.cpu == cpu )
{
- struct cpupool *c = per_cpu(cpupool, cpu);
+ struct cpupool *c = get_sched_res(cpu)->cpupool;
unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
/*
@@ -1098,7 +1098,7 @@ int cpu_disable_scheduler(unsigned int cpu)
cpumask_t online_affinity;
int ret = 0;
- c = per_cpu(cpupool, cpu);
+ c = get_sched_res(cpu)->cpupool;
if ( c == NULL )
return ret;
@@ -1167,7 +1167,7 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
struct cpupool *c;
struct vcpu *v;
- c = per_cpu(cpupool, cpu);
+ c = get_sched_res(cpu)->cpupool;
if ( c == NULL )
return 0;
@@ -2497,8 +2497,8 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
- struct cpupool *old_pool = per_cpu(cpupool, cpu);
struct sched_resource *sd = get_sched_res(cpu);
+ struct cpupool *old_pool = sd->cpupool;
spinlock_t *old_lock, *new_lock;
unsigned long flags;
@@ -2580,7 +2580,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
sched_free_vdata(old_ops, vpriv_old);
sched_free_pdata(old_ops, ppriv_old, cpu);
- per_cpu(cpupool, cpu) = c;
+ get_sched_res(cpu)->cpupool = c;
/* When a cpu is added to a pool, trigger it to go pick up some work */
if ( c != NULL )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
@@ -37,6 +37,7 @@ extern const cpumask_t *sched_res_mask;
* one it wants (This may be the one right in front of it).*/
struct sched_resource {
struct scheduler *scheduler;
+ struct cpupool *cpupool;
spinlock_t *schedule_lock,
_lock;
struct sched_unit *curr; /* current task */
@@ -51,7 +52,6 @@ struct sched_resource {
#define curr_on_cpu(c) (get_sched_res(c)->curr)
-DECLARE_PER_CPU(struct cpupool *, cpupool);
DECLARE_PER_CPU(struct sched_resource *, sched_res);
static inline struct sched_resource *get_sched_res(unsigned int cpu)
Having a pointer to struct cpupool in struct sched_resource instead of per cpu is enough. Signed-off-by: Juergen Gross <jgross@suse.com> --- V1: new patch --- xen/common/cpupool.c | 4 +--- xen/common/sched_credit.c | 2 +- xen/common/sched_rt.c | 2 +- xen/common/schedule.c | 8 ++++---- xen/include/xen/sched-if.h | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-)