@@ -53,7 +53,7 @@
* Return a pointer to the ARINC 653-specific scheduler data information
* associated with the given VCPU (vc)
*/
-#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_item->priv)
/**
* Return the global scheduler private data given the scheduler ops pointer
@@ -647,7 +647,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_item->priv = vdata;
per_cpu(scheduler, cpu) = new_ops;
per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
@@ -83,7 +83,7 @@
((struct csched_private *)((_ops)->sched_data))
#define CSCHED_PCPU(_c) \
((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
+#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_item->priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
@@ -641,7 +641,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
ASSERT(svc && is_idle_vcpu(svc->vcpu));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_item->priv = vdata;
/*
* We are holding the runqueue lock already (it's been taken in
@@ -1031,7 +1031,7 @@ static void
csched_item_insert(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu *svc = vc->sched_priv;
+ struct csched_vcpu *svc = item->priv;
spinlock_t *lock;
BUG_ON( is_idle_vcpu(vc) );
@@ -572,7 +572,7 @@ static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
{
- return v->sched_priv;
+ return v->sched_item->priv;
}
static inline struct csched2_dom *csched2_dom(const struct domain *d)
@@ -970,7 +970,7 @@ _runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
static void
runq_assign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = vc->sched_item->priv;
ASSERT(svc->rqd == NULL);
@@ -997,7 +997,7 @@ _runq_deassign(struct csched2_vcpu *svc)
static void
runq_deassign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = vc->sched_item->priv;
ASSERT(svc->rqd == c2rqd(ops, vc->processor));
@@ -3108,7 +3108,7 @@ static void
csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = item->priv;
struct csched2_dom * const sdom = svc->sdom;
spinlock_t *lock;
@@ -3888,7 +3888,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
ASSERT(!local_irq_is_enabled());
write_lock(&prv->lock);
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_item->priv = vdata;
rqi = init_pdata(prv, pdata, cpu);
@@ -117,7 +117,7 @@ static inline struct null_private *null_priv(const struct scheduler *ops)
static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
{
- return v->sched_priv;
+ return v->sched_item->priv;
}
static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
@@ -391,7 +391,7 @@ static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_item->priv = vdata;
/*
* We are holding the runqueue lock already (it's been taken in
@@ -235,7 +235,7 @@ static inline struct rt_private *rt_priv(const struct scheduler *ops)
static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
{
- return vcpu->sched_priv;
+ return vcpu->sched_item->priv;
}
static inline struct list_head *rt_runq(const struct scheduler *ops)
@@ -761,7 +761,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_item->priv = vdata;
per_cpu(scheduler, cpu) = new_ops;
per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
@@ -269,8 +269,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
init_timer(&v->poll_timer, poll_timer_fn,
v, v->processor);
- v->sched_priv = sched_alloc_vdata(dom_scheduler(d), item, d->sched_priv);
- if ( v->sched_priv == NULL )
+ item->priv = sched_alloc_vdata(dom_scheduler(d), item, d->sched_priv);
+ if ( item->priv == NULL )
{
v->sched_item = NULL;
xfree(item);
@@ -365,7 +365,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
{
spinlock_t *lock;
- vcpudata = v->sched_priv;
+ vcpudata = v->sched_item->priv;
migrate_timer(&v->periodic_timer, new_p);
migrate_timer(&v->singleshot_timer, new_p);
@@ -383,7 +383,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
*/
spin_unlock_irq(lock);
- v->sched_priv = vcpu_priv[v->vcpu_id];
+ v->sched_item->priv = vcpu_priv[v->vcpu_id];
if ( !d->is_dying )
sched_move_irqs(v);
@@ -415,7 +415,7 @@ void sched_destroy_vcpu(struct vcpu *v)
if ( test_and_clear_bool(v->is_urgent) )
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_item(vcpu_scheduler(v), item);
- sched_free_vdata(vcpu_scheduler(v), v->sched_priv);
+ sched_free_vdata(vcpu_scheduler(v), item->priv);
xfree(item);
v->sched_item = NULL;
}
@@ -1593,6 +1593,7 @@ static int cpu_schedule_up(unsigned int cpu)
else
{
struct vcpu *idle = idle_vcpu[cpu];
+ struct sched_item *item = idle->sched_item;
/*
* During (ACPI?) suspend the idle vCPU for this pCPU is not freed,
@@ -1604,11 +1605,10 @@ static int cpu_schedule_up(unsigned int cpu)
* with a different scheduler, it is schedule_cpu_switch(), invoked
* later, that will set things up as appropriate.
*/
- ASSERT(idle->sched_priv == NULL);
+ ASSERT(item->priv == NULL);
- idle->sched_priv = sched_alloc_vdata(&ops, idle->sched_item,
- idle->domain->sched_priv);
- if ( idle->sched_priv == NULL )
+ item->priv = sched_alloc_vdata(&ops, item, idle->domain->sched_priv);
+ if ( item->priv == NULL )
return -ENOMEM;
}
if ( idle_vcpu[cpu] == NULL )
@@ -1634,9 +1634,9 @@ static void cpu_schedule_down(unsigned int cpu)
struct scheduler *sched = per_cpu(scheduler, cpu);
sched_free_pdata(sched, sd->sched_priv, cpu);
- sched_free_vdata(sched, idle_vcpu[cpu]->sched_priv);
+ sched_free_vdata(sched, idle_vcpu[cpu]->sched_item->priv);
- idle_vcpu[cpu]->sched_priv = NULL;
+ idle_vcpu[cpu]->sched_item->priv = NULL;
sd->sched_priv = NULL;
kill_timer(&sd->s_timer);
@@ -1861,7 +1861,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
*/
old_lock = pcpu_schedule_lock_irq(cpu);
- vpriv_old = idle->sched_priv;
+ vpriv_old = idle->sched_item->priv;
ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
sched_switch_sched(new_ops, cpu, ppriv, vpriv);
@@ -162,7 +162,6 @@ struct vcpu
struct timer poll_timer; /* timeout for SCHEDOP_poll */
struct sched_item *sched_item;
- void *sched_priv; /* scheduler-specific data */
struct vcpu_runstate_info runstate;
#ifndef CONFIG_COMPAT
@@ -279,6 +278,7 @@ struct vcpu
struct sched_item {
struct vcpu *vcpu;
+ void *priv; /* scheduler private data */
};
/* Per-domain lock can be recursively acquired in fault handlers. */
This prepares making the different schedulers vcpu agnostic. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/sched_arinc653.c | 4 ++-- xen/common/sched_credit.c | 6 +++--- xen/common/sched_credit2.c | 10 +++++----- xen/common/sched_null.c | 4 ++-- xen/common/sched_rt.c | 4 ++-- xen/common/schedule.c | 24 ++++++++++++------------ xen/include/xen/sched.h | 2 +- 7 files changed, 27 insertions(+), 27 deletions(-)