@@ -940,7 +940,8 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
static void
csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
{
- struct csched_item * const svc = CSCHED_ITEM(current->sched_item);
+ struct sched_item *curritem = current->sched_item;
+ struct csched_item * const svc = CSCHED_ITEM(curritem);
const struct scheduler *ops = per_cpu(scheduler, cpu);
ASSERT( current->processor == cpu );
@@ -976,7 +977,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
{
unsigned int new_cpu;
unsigned long flags;
- spinlock_t *lock = vcpu_schedule_lock_irqsave(current, &flags);
+ spinlock_t *lock = item_schedule_lock_irqsave(curritem, &flags);
/*
* If it's been active a while, check if we'd be better off
@@ -985,7 +986,7 @@ csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
*/
new_cpu = _csched_cpu_pick(ops, current, 0);
- vcpu_schedule_unlock_irqrestore(lock, flags, current);
+ item_schedule_unlock_irqrestore(lock, flags, curritem);
if ( new_cpu != cpu )
{
@@ -1037,19 +1038,19 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item)
BUG_ON( is_idle_vcpu(vc) );
/* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
item->res = csched_res_pick(ops, item);
vc->processor = item->res->processor;
spin_unlock_irq(lock);
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
runq_insert(svc);
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
SCHED_STAT_CRANK(vcpu_insert);
}
@@ -2145,12 +2146,12 @@ csched_dump(const struct scheduler *ops)
spinlock_t *lock;
svc = list_entry(iter_svc, struct csched_item, active_vcpu_elem);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = item_schedule_lock(svc->vcpu->sched_item);
printk("\t%3d: ", ++loop);
csched_dump_vcpu(svc);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
}
@@ -171,7 +171,7 @@
* - runqueue lock
* + it is per-runqueue, so:
* * cpus in a runqueue take the runqueue lock, when using
- * pcpu_schedule_lock() / vcpu_schedule_lock() (and friends),
+ * pcpu_schedule_lock() / item_schedule_lock() (and friends),
* * a cpu may (try to) take a "remote" runqueue lock, e.g., for
* load balancing;
* + serializes runqueue operations (removing and inserting vcpus);
@@ -1890,7 +1890,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
unsigned long flags;
s_time_t now;
- lock = vcpu_schedule_lock_irqsave(svc->vcpu, &flags);
+ lock = item_schedule_lock_irqsave(svc->vcpu->sched_item, &flags);
__clear_bit(_VPF_parked, &svc->vcpu->pause_flags);
if ( unlikely(svc->flags & CSFLAG_scheduled) )
@@ -1923,7 +1923,7 @@ unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
}
list_del_init(&svc->parked_elem);
- vcpu_schedule_unlock_irqrestore(lock, flags, svc->vcpu);
+ item_schedule_unlock_irqrestore(lock, flags, svc->vcpu->sched_item);
}
}
@@ -2162,7 +2162,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
struct csched2_item * const svc = csched2_item(item);
- spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = item_schedule_lock_irq(item);
s_time_t now = NOW();
LIST_HEAD(were_parked);
@@ -2194,7 +2194,7 @@ csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
else if ( !is_idle_vcpu(vc) )
update_load(ops, svc->rqd, svc, -1, now);
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
unpark_parked_vcpus(ops, &were_parked);
}
@@ -2847,14 +2847,14 @@ csched2_dom_cntl(
for_each_vcpu ( d, v )
{
struct csched2_item *svc = csched2_item(v->sched_item);
- spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
svc->weight = sdom->weight;
update_max_weight(svc->rqd, svc->weight, old_weight);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
}
/* Cap */
@@ -2885,7 +2885,7 @@ csched2_dom_cntl(
for_each_vcpu ( d, v )
{
svc = csched2_item(v->sched_item);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = item_schedule_lock(svc->vcpu->sched_item);
/*
* Too small quotas would in theory cause a lot of overhead,
* which then won't happen because, in csched2_runtime(),
@@ -2893,7 +2893,7 @@ csched2_dom_cntl(
*/
svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus,
CSCHED2_MIN_TIMER);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
if ( sdom->cap == 0 )
@@ -2928,7 +2928,7 @@ csched2_dom_cntl(
for_each_vcpu ( d, v )
{
svc = csched2_item(v->sched_item);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = item_schedule_lock(svc->vcpu->sched_item);
if ( v->is_running )
{
unsigned int cpu = v->processor;
@@ -2959,7 +2959,7 @@ csched2_dom_cntl(
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
svc->budget = 0;
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
}
@@ -2975,12 +2975,12 @@ csched2_dom_cntl(
for_each_vcpu ( d, v )
{
struct csched2_item *svc = csched2_item(v->sched_item);
- spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = item_schedule_lock(svc->vcpu->sched_item);
svc->budget = STIME_MAX;
svc->budget_quota = 0;
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
sdom->cap = 0;
/*
@@ -3119,19 +3119,19 @@ csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
ASSERT(list_empty(&svc->runq_elem));
/* csched2_res_pick() expects the pcpu lock to be held */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
item->res = csched2_res_pick(ops, item);
vc->processor = item->res->processor;
spin_unlock_irq(lock);
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
/* Add vcpu to runqueue of initial processor */
runq_assign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
sdom->nr_vcpus++;
@@ -3161,11 +3161,11 @@ csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
SCHED_STAT_CRANK(vcpu_remove);
/* Remove from runqueue */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
runq_deassign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
svc->sdom->nr_vcpus--;
}
@@ -3749,12 +3749,12 @@ csched2_dump(const struct scheduler *ops)
struct csched2_item * const svc = csched2_item(v->sched_item);
spinlock_t *lock;
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = item_schedule_lock(svc->vcpu->sched_item);
printk("\t%3d: ", ++loop);
csched2_dump_vcpu(prv, svc);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ item_schedule_unlock(lock, svc->vcpu->sched_item);
}
}
@@ -317,7 +317,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
* all the pCPUs are busy.
*
* In fact, there must always be something sane in v->processor, or
- * vcpu_schedule_lock() and friends won't work. This is not a problem,
+ * item_schedule_lock() and friends won't work. This is not a problem,
* as we will actually assign the vCPU to the pCPU we return from here,
* only if the pCPU is free.
*/
@@ -428,7 +428,7 @@ static void null_item_insert(const struct scheduler *ops,
ASSERT(!is_idle_vcpu(v));
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(item);
retry:
item->res = pick_res(prv, item);
@@ -436,7 +436,7 @@ static void null_item_insert(const struct scheduler *ops,
spin_unlock(lock);
- lock = vcpu_schedule_lock(v);
+ lock = item_schedule_lock(item);
cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
cpupool_domain_cpumask(v->domain));
@@ -522,7 +522,7 @@ static void null_item_remove(const struct scheduler *ops,
ASSERT(!is_idle_vcpu(v));
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(item);
/* If v is in waitqueue, just get it out of there and bail */
if ( unlikely(!list_empty(&nvc->waitq_elem)) )
@@ -540,7 +540,7 @@ static void null_item_remove(const struct scheduler *ops,
_vcpu_remove(prv, v);
out:
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, item);
SCHED_STAT_CRANK(vcpu_remove);
}
@@ -860,13 +860,13 @@ static void null_dump(const struct scheduler *ops)
struct null_item * const nvc = null_item(v->sched_item);
spinlock_t *lock;
- lock = vcpu_schedule_lock(nvc->vcpu);
+ lock = item_schedule_lock(nvc->vcpu->sched_item);
printk("\t%3d: ", ++loop);
dump_vcpu(prv, nvc);
printk("\n");
- vcpu_schedule_unlock(lock, nvc->vcpu);
+ item_schedule_unlock(lock, nvc->vcpu->sched_item);
}
}
@@ -177,7 +177,7 @@ static void repl_timer_handler(void *data);
/*
* System-wide private data, include global RunQueue/DepletedQ
* Global lock is referenced by sched_res->schedule_lock from all
- * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
+ * physical cpus. It can be grabbed via item_schedule_lock_irq()
*/
struct rt_private {
spinlock_t lock; /* the global coarse-grained lock */
@@ -904,7 +904,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
item->res = rt_res_pick(ops, item);
vc->processor = item->res->processor;
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
now = NOW();
if ( now >= svc->cur_deadline )
@@ -917,7 +917,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
if ( !vc->is_running )
runq_insert(ops, svc);
}
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
SCHED_STAT_CRANK(vcpu_insert);
}
@@ -928,7 +928,6 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
static void
rt_item_remove(const struct scheduler *ops, struct sched_item *item)
{
- struct vcpu *vc = item->vcpu;
struct rt_item * const svc = rt_item(item);
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
@@ -937,14 +936,14 @@ rt_item_remove(const struct scheduler *ops, struct sched_item *item)
BUG_ON( sdom == NULL );
- lock = vcpu_schedule_lock_irq(vc);
+ lock = item_schedule_lock_irq(item);
if ( vcpu_on_q(svc) )
q_remove(svc);
if ( vcpu_on_replq(svc) )
replq_remove(ops,svc);
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
}
/*
@@ -1339,7 +1338,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
struct rt_item *svc = rt_item(item);
- spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = item_schedule_lock_irq(item);
__clear_bit(__RTDS_scheduled, &svc->flags);
/* not insert idle vcpu to runq */
@@ -1356,7 +1355,7 @@ rt_context_saved(const struct scheduler *ops, struct sched_item *item)
replq_remove(ops, svc);
out:
- vcpu_schedule_unlock_irq(lock, vc);
+ item_schedule_unlock_irq(lock, item);
}
/*
@@ -194,7 +194,8 @@ static inline void vcpu_runstate_change(
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
{
- spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = likely(v == current)
+ ? NULL : item_schedule_lock_irq(v->sched_item);
s_time_t delta;
memcpy(runstate, &v->runstate, sizeof(*runstate));
@@ -203,7 +204,7 @@ void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
runstate->time[runstate->state] += delta;
if ( unlikely(lock != NULL) )
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, v->sched_item);
}
uint64_t get_cpu_idle_time(unsigned int cpu)
@@ -415,7 +416,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(v->sched_item);
sched_set_affinity(v, &cpumask_all, &cpumask_all);
@@ -424,7 +425,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
/*
* With v->processor modified we must not
* - make any further changes assuming we hold the scheduler lock,
- * - use vcpu_schedule_unlock_irq().
+ * - use item_schedule_unlock_irq().
*/
spin_unlock_irq(lock);
@@ -523,11 +524,11 @@ void vcpu_sleep_nosync(struct vcpu *v)
TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
- lock = vcpu_schedule_lock_irqsave(v, &flags);
+ lock = item_schedule_lock_irqsave(v->sched_item, &flags);
vcpu_sleep_nosync_locked(v);
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
}
void vcpu_sleep_sync(struct vcpu *v)
@@ -547,7 +548,7 @@ void vcpu_wake(struct vcpu *v)
TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
- lock = vcpu_schedule_lock_irqsave(v, &flags);
+ lock = item_schedule_lock_irqsave(v->sched_item, &flags);
if ( likely(vcpu_runnable(v)) )
{
@@ -561,7 +562,7 @@ void vcpu_wake(struct vcpu *v)
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
}
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
}
void vcpu_unblock(struct vcpu *v)
@@ -629,9 +630,9 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
* These steps are encapsulated in the following two functions; they
* should be called like this:
*
- * lock = vcpu_schedule_lock_irq(v);
+ * lock = item_schedule_lock_irq(item);
* vcpu_migrate_start(v);
- * vcpu_schedule_unlock_irq(lock, v)
+ * item_schedule_unlock_irq(lock, item)
* vcpu_migrate_finish(v);
*
* vcpu_migrate_finish() will do the work now if it can, or simply
@@ -736,12 +737,12 @@ static void vcpu_migrate_finish(struct vcpu *v)
*/
void vcpu_force_reschedule(struct vcpu *v)
{
- spinlock_t *lock = vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
if ( v->is_running )
vcpu_migrate_start(v);
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, v->sched_item);
vcpu_migrate_finish(v);
}
@@ -792,7 +793,7 @@ void restore_vcpu_affinity(struct domain *d)
v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
v->sched_item->res = per_cpu(sched_res, v->processor);
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(v->sched_item);
v->sched_item->res = sched_pick_resource(vcpu_scheduler(v),
v->sched_item);
v->processor = v->sched_item->res->processor;
@@ -827,7 +828,7 @@ int cpu_disable_scheduler(unsigned int cpu)
for_each_vcpu ( d, v )
{
unsigned long flags;
- spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
+ spinlock_t *lock = item_schedule_lock_irqsave(v->sched_item, &flags);
cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
if ( cpumask_empty(&online_affinity) &&
@@ -836,7 +837,7 @@ int cpu_disable_scheduler(unsigned int cpu)
if ( v->affinity_broken )
{
/* The vcpu is temporarily pinned, can't move it. */
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
ret = -EADDRINUSE;
break;
}
@@ -849,7 +850,7 @@ int cpu_disable_scheduler(unsigned int cpu)
if ( v->processor != cpu )
{
/* The vcpu is not on this cpu, so we can move on. */
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
continue;
}
@@ -862,7 +863,7 @@ int cpu_disable_scheduler(unsigned int cpu)
* things would have failed before getting in here.
*/
vcpu_migrate_start(v);
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
vcpu_migrate_finish(v);
@@ -926,7 +927,7 @@ static int vcpu_set_affinity(
spinlock_t *lock;
int ret = 0;
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(v->sched_item);
if ( v->affinity_broken )
ret = -EBUSY;
@@ -948,7 +949,7 @@ static int vcpu_set_affinity(
vcpu_migrate_start(v);
}
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, v->sched_item);
domain_update_node_affinity(v->domain);
@@ -1080,10 +1081,10 @@ static long do_poll(struct sched_poll *sched_poll)
long vcpu_yield(void)
{
struct vcpu * v=current;
- spinlock_t *lock = vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = item_schedule_lock_irq(v->sched_item);
sched_yield(vcpu_scheduler(v), v->sched_item);
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, v->sched_item);
SCHED_STAT_CRANK(vcpu_yield);
@@ -1169,7 +1170,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
spinlock_t *lock;
int ret = -EINVAL;
- lock = vcpu_schedule_lock_irq(v);
+ lock = item_schedule_lock_irq(v->sched_item);
if ( cpu < 0 )
{
@@ -1196,7 +1197,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
if ( ret == 0 )
vcpu_migrate_start(v);
- vcpu_schedule_unlock_irq(lock, v);
+ item_schedule_unlock_irq(lock, v->sched_item);
domain_update_node_affinity(v->domain);
@@ -91,22 +91,22 @@ static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
#define EXTRA_TYPE(arg)
sched_lock(pcpu, unsigned int cpu, cpu, )
-sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(item, const struct sched_item *i, i->res->processor, )
sched_lock(pcpu, unsigned int cpu, cpu, _irq)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irq)
sched_unlock(pcpu, unsigned int cpu, cpu, )
-sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(item, const struct sched_item *i, i->res->processor, )
sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irq)
#undef EXTRA_TYPE
#define EXTRA_TYPE(arg) , unsigned long arg
#define spin_unlock_irqsave spin_unlock_irqrestore
sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+sched_lock(item, const struct sched_item *i, i->res->processor, _irqsave, *flags)
#undef spin_unlock_irqsave
sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+sched_unlock(item, const struct sched_item *i, i->res->processor, _irqrestore, flags)
#undef EXTRA_TYPE
#undef sched_unlock
Rename vcpu_schedule_[un]lock[_irq]() to item_schedule_[un]lock[_irq]() and let it take a sched_item pointer instead of a vcpu pointer as parameter. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/sched_credit.c | 17 +++++++++-------- xen/common/sched_credit2.c | 40 +++++++++++++++++++-------------------- xen/common/sched_null.c | 14 +++++++------- xen/common/sched_rt.c | 15 +++++++-------- xen/common/schedule.c | 47 +++++++++++++++++++++++----------------------- xen/include/xen/sched-if.h | 12 ++++++------ 6 files changed, 73 insertions(+), 72 deletions(-)