@@ -475,7 +475,7 @@ a653sched_item_sleep(const struct scheduler *ops, struct sched_item *item)
* If the VCPU being put to sleep is the same one that is currently
* running, raise a softirq to invoke the scheduler to switch domains.
*/
- if ( per_cpu(schedule_data, vc->processor).curr == vc )
+ if ( per_cpu(schedule_data, vc->processor).curr == item )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
}
@@ -83,7 +83,7 @@
((struct csched_private *)((_ops)->sched_data))
#define CSCHED_PCPU(_c) \
((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_item->priv)
+#define CSCHED_ITEM(item) ((struct csched_item *) (item)->priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
@@ -160,7 +160,7 @@ struct csched_pcpu {
/*
* Virtual CPU
*/
-struct csched_vcpu {
+struct csched_item {
struct list_head runq_elem;
struct list_head active_vcpu_elem;
@@ -231,15 +231,15 @@ static void csched_tick(void *_cpu);
static void csched_acct(void *dummy);
static inline int
-__vcpu_on_runq(struct csched_vcpu *svc)
+__vcpu_on_runq(struct csched_item *svc)
{
return !list_empty(&svc->runq_elem);
}
-static inline struct csched_vcpu *
+static inline struct csched_item *
__runq_elem(struct list_head *elem)
{
- return list_entry(elem, struct csched_vcpu, runq_elem);
+ return list_entry(elem, struct csched_item, runq_elem);
}
/* Is the first element of cpu's runq (if any) cpu's idle vcpu? */
@@ -271,7 +271,7 @@ dec_nr_runnable(unsigned int cpu)
}
static inline void
-__runq_insert(struct csched_vcpu *svc)
+__runq_insert(struct csched_item *svc)
{
unsigned int cpu = svc->vcpu->processor;
const struct list_head * const runq = RUNQ(cpu);
@@ -281,7 +281,7 @@ __runq_insert(struct csched_vcpu *svc)
list_for_each( iter, runq )
{
- const struct csched_vcpu * const iter_svc = __runq_elem(iter);
+ const struct csched_item * const iter_svc = __runq_elem(iter);
if ( svc->pri > iter_svc->pri )
break;
}
@@ -302,34 +302,34 @@ __runq_insert(struct csched_vcpu *svc)
}
static inline void
-runq_insert(struct csched_vcpu *svc)
+runq_insert(struct csched_item *svc)
{
__runq_insert(svc);
inc_nr_runnable(svc->vcpu->processor);
}
static inline void
-__runq_remove(struct csched_vcpu *svc)
+__runq_remove(struct csched_item *svc)
{
BUG_ON( !__vcpu_on_runq(svc) );
list_del_init(&svc->runq_elem);
}
static inline void
-runq_remove(struct csched_vcpu *svc)
+runq_remove(struct csched_item *svc)
{
dec_nr_runnable(svc->vcpu->processor);
__runq_remove(svc);
}
-static void burn_credits(struct csched_vcpu *svc, s_time_t now)
+static void burn_credits(struct csched_item *svc, s_time_t now)
{
s_time_t delta;
uint64_t val;
unsigned int credits;
/* Assert svc is current */
- ASSERT( svc == CSCHED_VCPU(curr_on_cpu(svc->vcpu->processor)) );
+ ASSERT( svc == CSCHED_ITEM(curr_on_cpu(svc->vcpu->processor)) );
if ( (delta = now - svc->start_time) <= 0 )
return;
@@ -347,10 +347,10 @@ boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
-static inline void __runq_tickle(struct csched_vcpu *new)
+static inline void __runq_tickle(struct csched_item *new)
{
unsigned int cpu = new->vcpu->processor;
- struct csched_vcpu * const cur = CSCHED_VCPU(curr_on_cpu(cpu));
+ struct csched_item * const cur = CSCHED_ITEM(curr_on_cpu(cpu));
struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
cpumask_t mask, idle_mask, *online;
int balance_step, idlers_empty;
@@ -605,7 +605,7 @@ init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu)
spc->idle_bias = nr_cpu_ids - 1;
/* Start off idling... */
- BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
+ BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)->vcpu));
cpumask_set_cpu(cpu, prv->idlers);
spc->nr_runnable = 0;
}
@@ -637,7 +637,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
{
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
struct csched_private *prv = CSCHED_PRIV(new_ops);
- struct csched_vcpu *svc = vdata;
+ struct csched_item *svc = vdata;
ASSERT(svc && is_idle_vcpu(svc->vcpu));
@@ -669,7 +669,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
static inline void
__csched_vcpu_check(struct vcpu *vc)
{
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_item * const svc = CSCHED_ITEM(vc->sched_item);
struct csched_dom * const sdom = svc->sdom;
BUG_ON( svc->vcpu != vc );
@@ -871,7 +871,7 @@ static struct sched_resource *
csched_res_pick(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu *svc = CSCHED_VCPU(vc);
+ struct csched_item *svc = CSCHED_ITEM(item);
/*
* We have been called by vcpu_migrate() (in schedule.c), as part
@@ -885,7 +885,7 @@ csched_res_pick(const struct scheduler *ops, struct sched_item *item)
}
static inline void
-__csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
+__csched_vcpu_acct_start(struct csched_private *prv, struct csched_item *svc)
{
struct csched_dom * const sdom = svc->sdom;
unsigned long flags;
@@ -915,7 +915,7 @@ __csched_vcpu_acct_start(struct csched_private *prv, struct csched_vcpu *svc)
static inline void
__csched_vcpu_acct_stop_locked(struct csched_private *prv,
- struct csched_vcpu *svc)
+ struct csched_item *svc)
{
struct csched_dom * const sdom = svc->sdom;
@@ -940,7 +940,7 @@ __csched_vcpu_acct_stop_locked(struct csched_private *prv,
static void
csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
{
- struct csched_vcpu * const svc = CSCHED_VCPU(current);
+ struct csched_item * const svc = CSCHED_ITEM(current->sched_item);
const struct scheduler *ops = per_cpu(scheduler, cpu);
ASSERT( current->processor == cpu );
@@ -1009,10 +1009,10 @@ csched_alloc_vdata(const struct scheduler *ops, struct sched_item *item,
void *dd)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu *svc;
+ struct csched_item *svc;
/* Allocate per-VCPU info */
- svc = xzalloc(struct csched_vcpu);
+ svc = xzalloc(struct csched_item);
if ( svc == NULL )
return NULL;
@@ -1031,7 +1031,7 @@ static void
csched_item_insert(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu *svc = item->priv;
+ struct csched_item *svc = item->priv;
spinlock_t *lock;
BUG_ON( is_idle_vcpu(vc) );
@@ -1057,7 +1057,7 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item)
static void
csched_free_vdata(const struct scheduler *ops, void *priv)
{
- struct csched_vcpu *svc = priv;
+ struct csched_item *svc = priv;
BUG_ON( !list_empty(&svc->runq_elem) );
@@ -1068,8 +1068,7 @@ static void
csched_item_remove(const struct scheduler *ops, struct sched_item *item)
{
struct csched_private *prv = CSCHED_PRIV(ops);
- struct vcpu *vc = item->vcpu;
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_item * const svc = CSCHED_ITEM(item);
struct csched_dom * const sdom = svc->sdom;
SCHED_STAT_CRANK(vcpu_remove);
@@ -1096,14 +1095,14 @@ static void
csched_item_sleep(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_item * const svc = CSCHED_ITEM(item);
unsigned int cpu = vc->processor;
SCHED_STAT_CRANK(vcpu_sleep);
BUG_ON( is_idle_vcpu(vc) );
- if ( curr_on_cpu(cpu) == vc )
+ if ( curr_on_cpu(cpu) == item )
{
/*
* We are about to tickle cpu, so we should clear its bit in idlers.
@@ -1121,12 +1120,12 @@ static void
csched_item_wake(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_item * const svc = CSCHED_ITEM(item);
bool_t migrating;
BUG_ON( is_idle_vcpu(vc) );
- if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+ if ( unlikely(curr_on_cpu(vc->processor) == item) )
{
SCHED_STAT_CRANK(vcpu_wake_running);
return;
@@ -1182,8 +1181,7 @@ csched_item_wake(const struct scheduler *ops, struct sched_item *item)
static void
csched_item_yield(const struct scheduler *ops, struct sched_item *item)
{
- struct vcpu *vc = item->vcpu;
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_item * const svc = CSCHED_ITEM(item);
/* Let the scheduler know that this vcpu is trying to yield */
set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
@@ -1238,8 +1236,7 @@ static void
csched_aff_cntl(const struct scheduler *ops, struct sched_item *item,
const cpumask_t *hard, const cpumask_t *soft)
{
- struct vcpu *v = item->vcpu;
- struct csched_vcpu *svc = CSCHED_VCPU(v);
+ struct csched_item *svc = CSCHED_ITEM(item);
if ( !hard )
return;
@@ -1342,7 +1339,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
{
struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
struct list_head *runq, *elem, *next, *last_under;
- struct csched_vcpu *svc_elem;
+ struct csched_item *svc_elem;
spinlock_t *lock;
unsigned long flags;
int sort_epoch;
@@ -1388,7 +1385,7 @@ csched_acct(void* dummy)
unsigned long flags;
struct list_head *iter_vcpu, *next_vcpu;
struct list_head *iter_sdom, *next_sdom;
- struct csched_vcpu *svc;
+ struct csched_item *svc;
struct csched_dom *sdom;
uint32_t credit_total;
uint32_t weight_total;
@@ -1511,7 +1508,7 @@ csched_acct(void* dummy)
list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
{
- svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
+ svc = list_entry(iter_vcpu, struct csched_item, active_vcpu_elem);
BUG_ON( sdom != svc->sdom );
/* Increment credit */
@@ -1614,12 +1611,12 @@ csched_tick(void *_cpu)
set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
}
-static struct csched_vcpu *
+static struct csched_item *
csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
{
const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
- struct csched_vcpu *speer;
+ struct csched_item *speer;
struct list_head *iter;
struct vcpu *vc;
@@ -1629,7 +1626,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
* Don't steal from an idle CPU's runq because it's about to
* pick up work from it itself.
*/
- if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+ if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu)->vcpu)) )
goto out;
list_for_each( iter, &peer_pcpu->runq )
@@ -1691,12 +1688,12 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
return NULL;
}
-static struct csched_vcpu *
+static struct csched_item *
csched_load_balance(struct csched_private *prv, int cpu,
- struct csched_vcpu *snext, bool_t *stolen)
+ struct csched_item *snext, bool_t *stolen)
{
struct cpupool *c = per_cpu(cpupool, cpu);
- struct csched_vcpu *speer;
+ struct csched_item *speer;
cpumask_t workers;
cpumask_t *online;
int peer_cpu, first_cpu, peer_node, bstep;
@@ -1845,9 +1842,9 @@ csched_schedule(
{
const int cpu = smp_processor_id();
struct list_head * const runq = RUNQ(cpu);
- struct csched_vcpu * const scurr = CSCHED_VCPU(current);
+ struct csched_item * const scurr = CSCHED_ITEM(current->sched_item);
struct csched_private *prv = CSCHED_PRIV(ops);
- struct csched_vcpu *snext;
+ struct csched_item *snext;
struct task_slice ret;
s_time_t runtime, tslice;
@@ -1963,7 +1960,7 @@ csched_schedule(
if ( tasklet_work_scheduled )
{
TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
- snext = CSCHED_VCPU(idle_vcpu[cpu]);
+ snext = CSCHED_ITEM(idle_vcpu[cpu]->sched_item);
snext->pri = CSCHED_PRI_TS_BOOST;
}
@@ -2015,7 +2012,7 @@ out:
}
static void
-csched_dump_vcpu(struct csched_vcpu *svc)
+csched_dump_vcpu(struct csched_item *svc)
{
struct csched_dom * const sdom = svc->sdom;
@@ -2051,7 +2048,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
struct list_head *runq, *iter;
struct csched_private *prv = CSCHED_PRIV(ops);
struct csched_pcpu *spc;
- struct csched_vcpu *svc;
+ struct csched_item *svc;
spinlock_t *lock;
unsigned long flags;
int loop;
@@ -2075,7 +2072,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu). */
- svc = CSCHED_VCPU(curr_on_cpu(cpu));
+ svc = CSCHED_ITEM(curr_on_cpu(cpu));
if ( svc && !is_idle_vcpu(svc->vcpu) )
{
printk("\trun: ");
@@ -2144,10 +2141,10 @@ csched_dump(const struct scheduler *ops)
list_for_each( iter_svc, &sdom->active_vcpu )
{
- struct csched_vcpu *svc;
+ struct csched_item *svc;
spinlock_t *lock;
- svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+ svc = list_entry(iter_svc, struct csched_item, active_vcpu_elem);
lock = vcpu_schedule_lock(svc->vcpu);
printk("\t%3d: ", ++loop);
@@ -176,7 +176,7 @@
* load balancing;
* + serializes runqueue operations (removing and inserting vcpus);
* + protects runqueue-wide data in csched2_runqueue_data;
- * + protects vcpu parameters in csched2_vcpu for the vcpu in the
+ * + protects vcpu parameters in csched2_item for the vcpu in the
* runqueue.
*
* - Private scheduler lock
@@ -511,7 +511,7 @@ struct csched2_pcpu {
/*
* Virtual CPU
*/
-struct csched2_vcpu {
+struct csched2_item {
struct csched2_dom *sdom; /* Up-pointer to domain */
struct vcpu *vcpu; /* Up-pointer, to vcpu */
struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */
@@ -570,9 +570,9 @@ static inline struct csched2_pcpu *csched2_pcpu(unsigned int cpu)
return per_cpu(schedule_data, cpu).sched_priv;
}
-static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
+static inline struct csched2_item *csched2_item(const struct sched_item *item)
{
- return v->sched_item->priv;
+ return item->priv;
}
static inline struct csched2_dom *csched2_dom(const struct domain *d)
@@ -594,7 +594,7 @@ static inline struct csched2_runqueue_data *c2rqd(const struct scheduler *ops,
}
/* Does the domain of this vCPU have a cap? */
-static inline bool has_cap(const struct csched2_vcpu *svc)
+static inline bool has_cap(const struct csched2_item *svc)
{
return svc->budget != STIME_MAX;
}
@@ -688,7 +688,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
* Of course, 1, 2 and 3 makes sense only if svc has a soft affinity. Also
* note that at least 5 is guaranteed to _always_ return at least one pcpu.
*/
-static int get_fallback_cpu(struct csched2_vcpu *svc)
+static int get_fallback_cpu(struct csched2_item *svc)
{
struct vcpu *v = svc->vcpu;
unsigned int bs;
@@ -773,7 +773,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
* FIXME: Do pre-calculated division?
*/
static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
- struct csched2_vcpu *svc)
+ struct csched2_item *svc)
{
uint64_t val = time * rqd->max_weight + svc->residual;
@@ -781,7 +781,7 @@ static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
svc->credit -= val;
}
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_vcpu *svc)
+static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_item *svc)
{
return credit * svc->weight / rqd->max_weight;
}
@@ -790,14 +790,14 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct c
* Runqueue related code.
*/
-static inline int vcpu_on_runq(struct csched2_vcpu *svc)
+static inline int vcpu_on_runq(struct csched2_item *svc)
{
return !list_empty(&svc->runq_elem);
}
-static inline struct csched2_vcpu * runq_elem(struct list_head *elem)
+static inline struct csched2_item * runq_elem(struct list_head *elem)
{
- return list_entry(elem, struct csched2_vcpu, runq_elem);
+ return list_entry(elem, struct csched2_item, runq_elem);
}
static void activate_runqueue(struct csched2_private *prv, int rqi)
@@ -915,7 +915,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
list_for_each( iter, &rqd->svc )
{
- struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+ struct csched2_item * svc = list_entry(iter, struct csched2_item, rqd_elem);
if ( svc->weight > max_weight )
max_weight = svc->weight;
@@ -940,7 +940,7 @@ static void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight,
/* Add and remove from runqueue assignment (not active run queue) */
static void
-_runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
+_runq_assign(struct csched2_item *svc, struct csched2_runqueue_data *rqd)
{
svc->rqd = rqd;
@@ -970,7 +970,7 @@ _runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd)
static void
runq_assign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_item->priv;
+ struct csched2_item *svc = vc->sched_item->priv;
ASSERT(svc->rqd == NULL);
@@ -978,7 +978,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
}
static void
-_runq_deassign(struct csched2_vcpu *svc)
+_runq_deassign(struct csched2_item *svc)
{
struct csched2_runqueue_data *rqd = svc->rqd;
@@ -997,7 +997,7 @@ _runq_deassign(struct csched2_vcpu *svc)
static void
runq_deassign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_item->priv;
+ struct csched2_item *svc = vc->sched_item->priv;
ASSERT(svc->rqd == c2rqd(ops, vc->processor));
@@ -1199,7 +1199,7 @@ update_runq_load(const struct scheduler *ops,
static void
update_svc_load(const struct scheduler *ops,
- struct csched2_vcpu *svc, int change, s_time_t now)
+ struct csched2_item *svc, int change, s_time_t now)
{
struct csched2_private *prv = csched2_priv(ops);
s_time_t delta, vcpu_load;
@@ -1259,7 +1259,7 @@ update_svc_load(const struct scheduler *ops,
static void
update_load(const struct scheduler *ops,
struct csched2_runqueue_data *rqd,
- struct csched2_vcpu *svc, int change, s_time_t now)
+ struct csched2_item *svc, int change, s_time_t now)
{
trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0, NULL);
@@ -1269,7 +1269,7 @@ update_load(const struct scheduler *ops,
}
static void
-runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct csched2_item *svc)
{
struct list_head *iter;
unsigned int cpu = svc->vcpu->processor;
@@ -1288,7 +1288,7 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
list_for_each( iter, runq )
{
- struct csched2_vcpu * iter_svc = runq_elem(iter);
+ struct csched2_item * iter_svc = runq_elem(iter);
if ( svc->credit > iter_svc->credit )
break;
@@ -1312,13 +1312,13 @@ runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
}
}
-static inline void runq_remove(struct csched2_vcpu *svc)
+static inline void runq_remove(struct csched2_item *svc)
{
ASSERT(vcpu_on_runq(svc));
list_del_init(&svc->runq_elem);
}
-void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_time_t);
+void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_item *, s_time_t);
static inline void
tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
@@ -1334,7 +1334,7 @@ tickle_cpu(unsigned int cpu, struct csched2_runqueue_data *rqd)
* whether or not it already run for more than the ratelimit, to which we
* apply some tolerance).
*/
-static inline bool is_preemptable(const struct csched2_vcpu *svc,
+static inline bool is_preemptable(const struct csched2_item *svc,
s_time_t now, s_time_t ratelimit)
{
if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE )
@@ -1360,10 +1360,10 @@ static inline bool is_preemptable(const struct csched2_vcpu *svc,
* Within the same class, the highest difference of credit.
*/
static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
- struct csched2_vcpu *new, unsigned int cpu)
+ struct csched2_item *new, unsigned int cpu)
{
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
- struct csched2_vcpu * cur = csched2_vcpu(curr_on_cpu(cpu));
+ struct csched2_item * cur = csched2_item(curr_on_cpu(cpu));
struct csched2_private *prv = csched2_priv(ops);
s_time_t score;
@@ -1432,7 +1432,7 @@ static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
* pick up some work, so it would be wrong to consider it idle.
*/
static void
-runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
+runq_tickle(const struct scheduler *ops, struct csched2_item *new, s_time_t now)
{
int i, ipid = -1;
s_time_t max = 0;
@@ -1587,7 +1587,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
return;
}
- ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)));
+ ASSERT(!is_idle_vcpu(curr_on_cpu(ipid)->vcpu));
SCHED_STAT_CRANK(tickled_busy_cpu);
tickle:
BUG_ON(ipid == -1);
@@ -1614,7 +1614,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
* Credit-related code
*/
static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
- struct csched2_vcpu *snext)
+ struct csched2_item *snext)
{
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
struct list_head *iter;
@@ -1644,10 +1644,10 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
list_for_each( iter, &rqd->svc )
{
unsigned int svc_cpu;
- struct csched2_vcpu * svc;
+ struct csched2_item * svc;
int start_credit;
- svc = list_entry(iter, struct csched2_vcpu, rqd_elem);
+ svc = list_entry(iter, struct csched2_item, rqd_elem);
svc_cpu = svc->vcpu->processor;
ASSERT(!is_idle_vcpu(svc->vcpu));
@@ -1657,7 +1657,7 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
* If svc is running, it is our responsibility to make sure, here,
* that the credit it has spent so far get accounted.
*/
- if ( svc->vcpu == curr_on_cpu(svc_cpu) )
+ if ( svc->vcpu == curr_on_cpu(svc_cpu)->vcpu )
{
burn_credits(rqd, svc, now);
/*
@@ -1709,11 +1709,11 @@ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
}
void burn_credits(struct csched2_runqueue_data *rqd,
- struct csched2_vcpu *svc, s_time_t now)
+ struct csched2_item *svc, s_time_t now)
{
s_time_t delta;
- ASSERT(svc == csched2_vcpu(curr_on_cpu(svc->vcpu->processor)));
+ ASSERT(svc == csched2_item(curr_on_cpu(svc->vcpu->processor)));
if ( unlikely(is_idle_vcpu(svc->vcpu)) )
{
@@ -1763,7 +1763,7 @@ void burn_credits(struct csched2_runqueue_data *rqd,
* Budget-related code.
*/
-static void park_vcpu(struct csched2_vcpu *svc)
+static void park_vcpu(struct csched2_item *svc)
{
struct vcpu *v = svc->vcpu;
@@ -1792,7 +1792,7 @@ static void park_vcpu(struct csched2_vcpu *svc)
list_add(&svc->parked_elem, &svc->sdom->parked_vcpus);
}
-static bool vcpu_grab_budget(struct csched2_vcpu *svc)
+static bool vcpu_grab_budget(struct csched2_item *svc)
{
struct csched2_dom *sdom = svc->sdom;
unsigned int cpu = svc->vcpu->processor;
@@ -1839,7 +1839,7 @@ static bool vcpu_grab_budget(struct csched2_vcpu *svc)
}
static void
-vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
+vcpu_return_budget(struct csched2_item *svc, struct list_head *parked)
{
struct csched2_dom *sdom = svc->sdom;
unsigned int cpu = svc->vcpu->processor;
@@ -1882,7 +1882,7 @@ vcpu_return_budget(struct csched2_vcpu *svc, struct list_head *parked)
static void
unpark_parked_vcpus(const struct scheduler *ops, struct list_head *vcpus)
{
- struct csched2_vcpu *svc, *tmp;
+ struct csched2_item *svc, *tmp;
spinlock_t *lock;
list_for_each_entry_safe(svc, tmp, vcpus, parked_elem)
@@ -2004,7 +2004,7 @@ static void replenish_domain_budget(void* data)
static inline void
csched2_vcpu_check(struct vcpu *vc)
{
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(vc->sched_item);
struct csched2_dom * const sdom = svc->sdom;
BUG_ON( svc->vcpu != vc );
@@ -2030,10 +2030,10 @@ csched2_alloc_vdata(const struct scheduler *ops, struct sched_item *item,
void *dd)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu *svc;
+ struct csched2_item *svc;
/* Allocate per-VCPU info */
- svc = xzalloc(struct csched2_vcpu);
+ svc = xzalloc(struct csched2_item);
if ( svc == NULL )
return NULL;
@@ -2074,12 +2074,12 @@ static void
csched2_item_sleep(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(item);
ASSERT(!is_idle_vcpu(vc));
SCHED_STAT_CRANK(vcpu_sleep);
- if ( curr_on_cpu(vc->processor) == vc )
+ if ( curr_on_cpu(vc->processor) == item )
{
tickle_cpu(vc->processor, svc->rqd);
}
@@ -2097,7 +2097,7 @@ static void
csched2_item_wake(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(item);
unsigned int cpu = vc->processor;
s_time_t now;
@@ -2105,7 +2105,7 @@ csched2_item_wake(const struct scheduler *ops, struct sched_item *item)
ASSERT(!is_idle_vcpu(vc));
- if ( unlikely(curr_on_cpu(cpu) == vc) )
+ if ( unlikely(curr_on_cpu(cpu) == item) )
{
SCHED_STAT_CRANK(vcpu_wake_running);
goto out;
@@ -2152,8 +2152,7 @@ out:
static void
csched2_item_yield(const struct scheduler *ops, struct sched_item *item)
{
- struct vcpu *v = item->vcpu;
- struct csched2_vcpu * const svc = csched2_vcpu(v);
+ struct csched2_item * const svc = csched2_item(item);
__set_bit(__CSFLAG_vcpu_yield, &svc->flags);
}
@@ -2162,7 +2161,7 @@ static void
csched2_context_saved(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(item);
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
s_time_t now = NOW();
LIST_HEAD(were_parked);
@@ -2208,7 +2207,7 @@ csched2_res_pick(const struct scheduler *ops, struct sched_item *item)
struct vcpu *vc = item->vcpu;
int i, min_rqi = -1, min_s_rqi = -1;
unsigned int new_cpu, cpu = vc->processor;
- struct csched2_vcpu *svc = csched2_vcpu(vc);
+ struct csched2_item *svc = csched2_item(item);
s_time_t min_avgload = MAX_LOAD, min_s_avgload = MAX_LOAD;
bool has_soft;
@@ -2430,15 +2429,15 @@ csched2_res_pick(const struct scheduler *ops, struct sched_item *item)
typedef struct {
/* NB: Modified by consider() */
s_time_t load_delta;
- struct csched2_vcpu * best_push_svc, *best_pull_svc;
+ struct csched2_item * best_push_svc, *best_pull_svc;
/* NB: Read by consider() */
struct csched2_runqueue_data *lrqd;
struct csched2_runqueue_data *orqd;
} balance_state_t;
static void consider(balance_state_t *st,
- struct csched2_vcpu *push_svc,
- struct csched2_vcpu *pull_svc)
+ struct csched2_item *push_svc,
+ struct csched2_item *pull_svc)
{
s_time_t l_load, o_load, delta;
@@ -2471,8 +2470,8 @@ static void consider(balance_state_t *st,
static void migrate(const struct scheduler *ops,
- struct csched2_vcpu *svc,
- struct csched2_runqueue_data *trqd,
+ struct csched2_item *svc,
+ struct csched2_runqueue_data *trqd,
s_time_t now)
{
int cpu = svc->vcpu->processor;
@@ -2541,7 +2540,7 @@ static void migrate(const struct scheduler *ops,
* - svc is not already flagged to migrate,
* - if svc is allowed to run on at least one of the pcpus of rqd.
*/
-static bool vcpu_is_migrateable(struct csched2_vcpu *svc,
+static bool vcpu_is_migrateable(struct csched2_item *svc,
struct csched2_runqueue_data *rqd)
{
struct vcpu *v = svc->vcpu;
@@ -2691,7 +2690,7 @@ retry:
/* Reuse load delta (as we're trying to minimize it) */
list_for_each( push_iter, &st.lrqd->svc )
{
- struct csched2_vcpu * push_svc = list_entry(push_iter, struct csched2_vcpu, rqd_elem);
+ struct csched2_item * push_svc = list_entry(push_iter, struct csched2_item, rqd_elem);
update_svc_load(ops, push_svc, 0, now);
@@ -2700,7 +2699,7 @@ retry:
list_for_each( pull_iter, &st.orqd->svc )
{
- struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+ struct csched2_item * pull_svc = list_entry(pull_iter, struct csched2_item, rqd_elem);
if ( !inner_load_updated )
update_svc_load(ops, pull_svc, 0, now);
@@ -2719,7 +2718,7 @@ retry:
list_for_each( pull_iter, &st.orqd->svc )
{
- struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem);
+ struct csched2_item * pull_svc = list_entry(pull_iter, struct csched2_item, rqd_elem);
if ( !vcpu_is_migrateable(pull_svc, st.lrqd) )
continue;
@@ -2746,7 +2745,7 @@ csched2_item_migrate(
{
struct vcpu *vc = item->vcpu;
struct domain *d = vc->domain;
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(item);
struct csched2_runqueue_data *trqd;
s_time_t now = NOW();
@@ -2847,7 +2846,7 @@ csched2_dom_cntl(
/* Update weights for vcpus, and max_weight for runqueues on which they reside */
for_each_vcpu ( d, v )
{
- struct csched2_vcpu *svc = csched2_vcpu(v);
+ struct csched2_item *svc = csched2_item(v->sched_item);
spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
@@ -2861,7 +2860,7 @@ csched2_dom_cntl(
/* Cap */
if ( op->u.credit2.cap != 0 )
{
- struct csched2_vcpu *svc;
+ struct csched2_item *svc;
spinlock_t *lock;
/* Cap is only valid if it's below 100 * nr_of_vCPUS */
@@ -2885,7 +2884,7 @@ csched2_dom_cntl(
*/
for_each_vcpu ( d, v )
{
- svc = csched2_vcpu(v);
+ svc = csched2_item(v->sched_item);
lock = vcpu_schedule_lock(svc->vcpu);
/*
* Too small quotas would in theory cause a lot of overhead,
@@ -2928,14 +2927,14 @@ csched2_dom_cntl(
*/
for_each_vcpu ( d, v )
{
- svc = csched2_vcpu(v);
+ svc = csched2_item(v->sched_item);
lock = vcpu_schedule_lock(svc->vcpu);
if ( v->is_running )
{
unsigned int cpu = v->processor;
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
- ASSERT(curr_on_cpu(cpu) == v);
+ ASSERT(curr_on_cpu(cpu)->vcpu == v);
/*
* We are triggering a reschedule on the vCPU's
@@ -2975,7 +2974,7 @@ csched2_dom_cntl(
/* Disable budget accounting for all the vCPUs. */
for_each_vcpu ( d, v )
{
- struct csched2_vcpu *svc = csched2_vcpu(v);
+ struct csched2_item *svc = csched2_item(v->sched_item);
spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
svc->budget = STIME_MAX;
@@ -3012,8 +3011,7 @@ static void
csched2_aff_cntl(const struct scheduler *ops, struct sched_item *item,
const cpumask_t *hard, const cpumask_t *soft)
{
- struct vcpu *v = item->vcpu;
- struct csched2_vcpu *svc = csched2_vcpu(v);
+ struct csched2_item *svc = csched2_item(item);
if ( !hard )
return;
@@ -3113,7 +3111,7 @@ static void
csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu *svc = item->priv;
+ struct csched2_item *svc = item->priv;
struct csched2_dom * const sdom = svc->sdom;
spinlock_t *lock;
@@ -3145,7 +3143,7 @@ csched2_item_insert(const struct scheduler *ops, struct sched_item *item)
static void
csched2_free_vdata(const struct scheduler *ops, void *priv)
{
- struct csched2_vcpu *svc = priv;
+ struct csched2_item *svc = priv;
xfree(svc);
}
@@ -3154,7 +3152,7 @@ static void
csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct csched2_vcpu * const svc = csched2_vcpu(vc);
+ struct csched2_item * const svc = csched2_item(item);
spinlock_t *lock;
ASSERT(!is_idle_vcpu(vc));
@@ -3175,7 +3173,7 @@ csched2_item_remove(const struct scheduler *ops, struct sched_item *item)
/* How long should we let this vcpu run for? */
static s_time_t
csched2_runtime(const struct scheduler *ops, int cpu,
- struct csched2_vcpu *snext, s_time_t now)
+ struct csched2_item *snext, s_time_t now)
{
s_time_t time, min_time;
int rt_credit; /* Proposed runtime measured in credits */
@@ -3220,7 +3218,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
*/
if ( ! list_empty(runq) )
{
- struct csched2_vcpu *swait = runq_elem(runq->next);
+ struct csched2_item *swait = runq_elem(runq->next);
if ( ! is_idle_vcpu(swait->vcpu)
&& swait->credit > 0 )
@@ -3271,14 +3269,14 @@ csched2_runtime(const struct scheduler *ops, int cpu,
/*
* Find a candidate.
*/
-static struct csched2_vcpu *
+static struct csched2_item *
runq_candidate(struct csched2_runqueue_data *rqd,
- struct csched2_vcpu *scurr,
+ struct csched2_item *scurr,
int cpu, s_time_t now,
unsigned int *skipped)
{
struct list_head *iter, *temp;
- struct csched2_vcpu *snext = NULL;
+ struct csched2_item *snext = NULL;
struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
bool yield = false, soft_aff_preempt = false;
@@ -3359,12 +3357,12 @@ runq_candidate(struct csched2_runqueue_data *rqd,
if ( vcpu_runnable(scurr->vcpu) && !soft_aff_preempt )
snext = scurr;
else
- snext = csched2_vcpu(idle_vcpu[cpu]);
+ snext = csched2_item(idle_vcpu[cpu]->sched_item);
check_runq:
list_for_each_safe( iter, temp, &rqd->runq )
{
- struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem);
+ struct csched2_item * svc = list_entry(iter, struct csched2_item, runq_elem);
if ( unlikely(tb_init_done) )
{
@@ -3463,8 +3461,8 @@ csched2_schedule(
{
const int cpu = smp_processor_id();
struct csched2_runqueue_data *rqd;
- struct csched2_vcpu * const scurr = csched2_vcpu(current);
- struct csched2_vcpu *snext = NULL;
+ struct csched2_item * const scurr = csched2_item(current->sched_item);
+ struct csched2_item *snext = NULL;
unsigned int skipped_vcpus = 0;
struct task_slice ret;
bool tickled;
@@ -3540,7 +3538,7 @@ csched2_schedule(
{
__clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
- snext = csched2_vcpu(idle_vcpu[cpu]);
+ snext = csched2_item(idle_vcpu[cpu]->sched_item);
}
else
snext = runq_candidate(rqd, scurr, cpu, now, &skipped_vcpus);
@@ -3643,7 +3641,7 @@ csched2_schedule(
}
static void
-csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc)
+csched2_dump_vcpu(struct csched2_private *prv, struct csched2_item *svc)
{
printk("[%i.%i] flags=%x cpu=%i",
svc->vcpu->domain->domain_id,
@@ -3667,7 +3665,7 @@ static inline void
dump_pcpu(const struct scheduler *ops, int cpu)
{
struct csched2_private *prv = csched2_priv(ops);
- struct csched2_vcpu *svc;
+ struct csched2_item *svc;
printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
cpu, c2r(cpu),
@@ -3675,7 +3673,7 @@ dump_pcpu(const struct scheduler *ops, int cpu)
nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu) */
- svc = csched2_vcpu(curr_on_cpu(cpu));
+ svc = csched2_item(curr_on_cpu(cpu));
if ( svc && !is_idle_vcpu(svc->vcpu) )
{
printk("\trun: ");
@@ -3748,7 +3746,7 @@ csched2_dump(const struct scheduler *ops)
for_each_vcpu( sdom->dom, v )
{
- struct csched2_vcpu * const svc = csched2_vcpu(v);
+ struct csched2_item * const svc = csched2_item(v->sched_item);
spinlock_t *lock;
lock = vcpu_schedule_lock(svc->vcpu);
@@ -3777,7 +3775,7 @@ csched2_dump(const struct scheduler *ops)
printk("RUNQ:\n");
list_for_each( iter, runq )
{
- struct csched2_vcpu *svc = runq_elem(iter);
+ struct csched2_item *svc = runq_elem(iter);
if ( svc )
{
@@ -3879,7 +3877,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
struct csched2_private *prv = csched2_priv(new_ops);
- struct csched2_vcpu *svc = vdata;
+ struct csched2_item *svc = vdata;
unsigned rqi;
ASSERT(pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -94,7 +94,7 @@ DEFINE_PER_CPU(struct null_pcpu, npc);
/*
* Virtual CPU
*/
-struct null_vcpu {
+struct null_item {
struct list_head waitq_elem;
struct vcpu *vcpu;
};
@@ -115,9 +115,9 @@ static inline struct null_private *null_priv(const struct scheduler *ops)
return ops->sched_data;
}
-static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
+static inline struct null_item *null_item(const struct sched_item *item)
{
- return v->sched_item->priv;
+ return item->priv;
}
static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
@@ -197,9 +197,9 @@ static void *null_alloc_vdata(const struct scheduler *ops,
struct sched_item *item, void *dd)
{
struct vcpu *v = item->vcpu;
- struct null_vcpu *nvc;
+ struct null_item *nvc;
- nvc = xzalloc(struct null_vcpu);
+ nvc = xzalloc(struct null_item);
if ( nvc == NULL )
return NULL;
@@ -213,7 +213,7 @@ static void *null_alloc_vdata(const struct scheduler *ops,
static void null_free_vdata(const struct scheduler *ops, void *priv)
{
- struct null_vcpu *nvc = priv;
+ struct null_item *nvc = priv;
xfree(nvc);
}
@@ -390,7 +390,7 @@ static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
{
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
struct null_private *prv = null_priv(new_ops);
- struct null_vcpu *nvc = vdata;
+ struct null_item *nvc = vdata;
ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
@@ -422,7 +422,7 @@ static void null_item_insert(const struct scheduler *ops,
{
struct vcpu *v = item->vcpu;
struct null_private *prv = null_priv(ops);
- struct null_vcpu *nvc = null_vcpu(v);
+ struct null_item *nvc = null_item(item);
unsigned int cpu;
spinlock_t *lock;
@@ -479,9 +479,9 @@ static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
{
unsigned int bs;
unsigned int cpu = v->processor;
- struct null_vcpu *wvc;
+ struct null_item *wvc;
- ASSERT(list_empty(&null_vcpu(v)->waitq_elem));
+ ASSERT(list_empty(&null_item(v->sched_item)->waitq_elem));
vcpu_deassign(prv, v, cpu);
@@ -517,7 +517,7 @@ static void null_item_remove(const struct scheduler *ops,
{
struct vcpu *v = item->vcpu;
struct null_private *prv = null_priv(ops);
- struct null_vcpu *nvc = null_vcpu(v);
+ struct null_item *nvc = null_item(item);
spinlock_t *lock;
ASSERT(!is_idle_vcpu(v));
@@ -552,13 +552,13 @@ static void null_item_wake(const struct scheduler *ops,
ASSERT(!is_idle_vcpu(v));
- if ( unlikely(curr_on_cpu(v->processor) == v) )
+ if ( unlikely(curr_on_cpu(v->processor) == item) )
{
SCHED_STAT_CRANK(vcpu_wake_running);
return;
}
- if ( unlikely(!list_empty(&null_vcpu(v)->waitq_elem)) )
+ if ( unlikely(!list_empty(&null_item(item)->waitq_elem)) )
{
/* Not exactly "on runq", but close enough for reusing the counter */
SCHED_STAT_CRANK(vcpu_wake_onrunq);
@@ -582,7 +582,7 @@ static void null_item_sleep(const struct scheduler *ops,
ASSERT(!is_idle_vcpu(v));
/* If v is not assigned to a pCPU, or is not running, no need to bother */
- if ( curr_on_cpu(v->processor) == v )
+ if ( curr_on_cpu(v->processor) == item )
cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
SCHED_STAT_CRANK(vcpu_sleep);
@@ -600,7 +600,7 @@ static void null_item_migrate(const struct scheduler *ops,
{
struct vcpu *v = item->vcpu;
struct null_private *prv = null_priv(ops);
- struct null_vcpu *nvc = null_vcpu(v);
+ struct null_item *nvc = null_item(item);
ASSERT(!is_idle_vcpu(v));
@@ -685,7 +685,7 @@ static void null_item_migrate(const struct scheduler *ops,
#ifndef NDEBUG
static inline void null_vcpu_check(struct vcpu *v)
{
- struct null_vcpu * const nvc = null_vcpu(v);
+ struct null_item * const nvc = null_item(v->sched_item);
struct null_dom * const ndom = v->domain->sched_priv;
BUG_ON(nvc->vcpu != v);
@@ -715,7 +715,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
unsigned int bs;
const unsigned int cpu = smp_processor_id();
struct null_private *prv = null_priv(ops);
- struct null_vcpu *wvc;
+ struct null_item *wvc;
struct task_slice ret;
SCHED_STAT_CRANK(schedule);
@@ -798,7 +798,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
return ret;
}
-static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
+static inline void dump_vcpu(struct null_private *prv, struct null_item *nvc)
{
printk("[%i.%i] pcpu=%d", nvc->vcpu->domain->domain_id,
nvc->vcpu->vcpu_id, list_empty(&nvc->waitq_elem) ?
@@ -808,7 +808,7 @@ static inline void dump_vcpu(struct null_private *prv, struct null_vcpu *nvc)
static void null_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct null_private *prv = null_priv(ops);
- struct null_vcpu *nvc;
+ struct null_item *nvc;
spinlock_t *lock;
unsigned long flags;
@@ -823,7 +823,7 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
printk("\n");
/* current VCPU (nothing to say if that's the idle vcpu) */
- nvc = null_vcpu(curr_on_cpu(cpu));
+ nvc = null_item(curr_on_cpu(cpu));
if ( nvc && !is_idle_vcpu(nvc->vcpu) )
{
printk("\trun: ");
@@ -857,7 +857,7 @@ static void null_dump(const struct scheduler *ops)
printk("\tDomain: %d\n", ndom->dom->domain_id);
for_each_vcpu( ndom->dom, v )
{
- struct null_vcpu * const nvc = null_vcpu(v);
+ struct null_item * const nvc = null_item(v->sched_item);
spinlock_t *lock;
lock = vcpu_schedule_lock(nvc->vcpu);
@@ -875,7 +875,7 @@ static void null_dump(const struct scheduler *ops)
spin_lock(&prv->waitq_lock);
list_for_each( iter, &prv->waitq )
{
- struct null_vcpu *nvc = list_entry(iter, struct null_vcpu, waitq_elem);
+ struct null_item *nvc = list_entry(iter, struct null_item, waitq_elem);
if ( loop++ != 0 )
printk(", ");
@@ -195,7 +195,7 @@ struct rt_private {
/*
* Virtual CPU
*/
-struct rt_vcpu {
+struct rt_item {
struct list_head q_elem; /* on the runq/depletedq list */
struct list_head replq_elem; /* on the replenishment events list */
@@ -233,9 +233,9 @@ static inline struct rt_private *rt_priv(const struct scheduler *ops)
return ops->sched_data;
}
-static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
+static inline struct rt_item *rt_item(const struct sched_item *item)
{
- return vcpu->sched_item->priv;
+ return item->priv;
}
static inline struct list_head *rt_runq(const struct scheduler *ops)
@@ -253,7 +253,7 @@ static inline struct list_head *rt_replq(const struct scheduler *ops)
return &rt_priv(ops)->replq;
}
-static inline bool has_extratime(const struct rt_vcpu *svc)
+static inline bool has_extratime(const struct rt_item *svc)
{
return svc->flags & RTDS_extratime;
}
@@ -263,25 +263,25 @@ static inline bool has_extratime(const struct rt_vcpu *svc)
* and the replenishment events queue.
*/
static int
-vcpu_on_q(const struct rt_vcpu *svc)
+vcpu_on_q(const struct rt_item *svc)
{
return !list_empty(&svc->q_elem);
}
-static struct rt_vcpu *
+static struct rt_item *
q_elem(struct list_head *elem)
{
- return list_entry(elem, struct rt_vcpu, q_elem);
+ return list_entry(elem, struct rt_item, q_elem);
}
-static struct rt_vcpu *
+static struct rt_item *
replq_elem(struct list_head *elem)
{
- return list_entry(elem, struct rt_vcpu, replq_elem);
+ return list_entry(elem, struct rt_item, replq_elem);
}
static int
-vcpu_on_replq(const struct rt_vcpu *svc)
+vcpu_on_replq(const struct rt_item *svc)
{
return !list_empty(&svc->replq_elem);
}
@@ -291,7 +291,7 @@ vcpu_on_replq(const struct rt_vcpu *svc)
* Otherwise, return value < 0
*/
static s_time_t
-compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
+compare_vcpu_priority(const struct rt_item *v1, const struct rt_item *v2)
{
int prio = v2->priority_level - v1->priority_level;
@@ -305,7 +305,7 @@ compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
* Debug related code, dump vcpu/cpu information
*/
static void
-rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
+rt_dump_vcpu(const struct scheduler *ops, const struct rt_item *svc)
{
cpumask_t *cpupool_mask, *mask;
@@ -352,13 +352,13 @@ static void
rt_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *svc;
+ struct rt_item *svc;
unsigned long flags;
spin_lock_irqsave(&prv->lock, flags);
printk("CPU[%02d]\n", cpu);
/* current VCPU (nothing to say if that's the idle vcpu). */
- svc = rt_vcpu(curr_on_cpu(cpu));
+ svc = rt_item(curr_on_cpu(cpu));
if ( svc && !is_idle_vcpu(svc->vcpu) )
{
rt_dump_vcpu(ops, svc);
@@ -371,7 +371,7 @@ rt_dump(const struct scheduler *ops)
{
struct list_head *runq, *depletedq, *replq, *iter;
struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *svc;
+ struct rt_item *svc;
struct rt_dom *sdom;
unsigned long flags;
@@ -415,7 +415,7 @@ rt_dump(const struct scheduler *ops)
for_each_vcpu ( sdom->dom, v )
{
- svc = rt_vcpu(v);
+ svc = rt_item(v->sched_item);
rt_dump_vcpu(ops, svc);
}
}
@@ -429,7 +429,7 @@ rt_dump(const struct scheduler *ops)
* it needs to be updated to the deadline of the current period
*/
static void
-rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
+rt_update_deadline(s_time_t now, struct rt_item *svc)
{
ASSERT(now >= svc->cur_deadline);
ASSERT(svc->period != 0);
@@ -500,8 +500,8 @@ deadline_queue_remove(struct list_head *queue, struct list_head *elem)
}
static inline bool
-deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
- struct rt_vcpu *svc, struct list_head *elem,
+deadline_queue_insert(struct rt_item * (*qelem)(struct list_head *),
+ struct rt_item *svc, struct list_head *elem,
struct list_head *queue)
{
struct list_head *iter;
@@ -509,7 +509,7 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
list_for_each ( iter, queue )
{
- struct rt_vcpu * iter_svc = (*qelem)(iter);
+ struct rt_item * iter_svc = (*qelem)(iter);
if ( compare_vcpu_priority(svc, iter_svc) > 0 )
break;
pos++;
@@ -523,14 +523,14 @@ deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
deadline_queue_insert(&replq_elem, ##__VA_ARGS__)
static inline void
-q_remove(struct rt_vcpu *svc)
+q_remove(struct rt_item *svc)
{
ASSERT( vcpu_on_q(svc) );
list_del_init(&svc->q_elem);
}
static inline void
-replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_remove(const struct scheduler *ops, struct rt_item *svc)
{
struct rt_private *prv = rt_priv(ops);
struct list_head *replq = rt_replq(ops);
@@ -547,7 +547,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
*/
if ( !list_empty(replq) )
{
- struct rt_vcpu *svc_next = replq_elem(replq->next);
+ struct rt_item *svc_next = replq_elem(replq->next);
set_timer(&prv->repl_timer, svc_next->cur_deadline);
}
else
@@ -561,7 +561,7 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu *svc)
* Insert svc without budget in DepletedQ unsorted;
*/
static void
-runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+runq_insert(const struct scheduler *ops, struct rt_item *svc)
{
struct rt_private *prv = rt_priv(ops);
struct list_head *runq = rt_runq(ops);
@@ -579,7 +579,7 @@ runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
}
static void
-replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_insert(const struct scheduler *ops, struct rt_item *svc)
{
struct list_head *replq = rt_replq(ops);
struct rt_private *prv = rt_priv(ops);
@@ -601,10 +601,10 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
* changed.
*/
static void
-replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc)
+replq_reinsert(const struct scheduler *ops, struct rt_item *svc)
{
struct list_head *replq = rt_replq(ops);
- struct rt_vcpu *rearm_svc = svc;
+ struct rt_item *rearm_svc = svc;
bool_t rearm = 0;
ASSERT( vcpu_on_replq(svc) );
@@ -735,7 +735,7 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
struct rt_private *prv = rt_priv(new_ops);
- struct rt_vcpu *svc = vdata;
+ struct rt_item *svc = vdata;
ASSERT(!pdata && svc && is_idle_vcpu(svc->vcpu));
@@ -850,10 +850,10 @@ static void *
rt_alloc_vdata(const struct scheduler *ops, struct sched_item *item, void *dd)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu *svc;
+ struct rt_item *svc;
/* Allocate per-VCPU info */
- svc = xzalloc(struct rt_vcpu);
+ svc = xzalloc(struct rt_item);
if ( svc == NULL )
return NULL;
@@ -878,7 +878,7 @@ rt_alloc_vdata(const struct scheduler *ops, struct sched_item *item, void *dd)
static void
rt_free_vdata(const struct scheduler *ops, void *priv)
{
- struct rt_vcpu *svc = priv;
+ struct rt_item *svc = priv;
xfree(svc);
}
@@ -894,7 +894,7 @@ static void
rt_item_insert(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu *svc = rt_vcpu(vc);
+ struct rt_item *svc = rt_item(item);
s_time_t now;
spinlock_t *lock;
@@ -923,13 +923,13 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item)
}
/*
- * Remove rt_vcpu svc from the old scheduler in source cpupool.
+ * Remove rt_item svc from the old scheduler in source cpupool.
*/
static void
rt_item_remove(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu * const svc = rt_vcpu(vc);
+ struct rt_item * const svc = rt_item(item);
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
@@ -951,7 +951,7 @@ rt_item_remove(const struct scheduler *ops, struct sched_item *item)
* Burn budget in nanosecond granularity
*/
static void
-burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
+burn_budget(const struct scheduler *ops, struct rt_item *svc, s_time_t now)
{
s_time_t delta;
@@ -1015,13 +1015,13 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
* RunQ is sorted. Pick first one within cpumask. If no one, return NULL
* lock is grabbed before calling this function
*/
-static struct rt_vcpu *
+static struct rt_item *
runq_pick(const struct scheduler *ops, const cpumask_t *mask)
{
struct list_head *runq = rt_runq(ops);
struct list_head *iter;
- struct rt_vcpu *svc = NULL;
- struct rt_vcpu *iter_svc = NULL;
+ struct rt_item *svc = NULL;
+ struct rt_item *iter_svc = NULL;
cpumask_t cpu_common;
cpumask_t *online;
@@ -1072,8 +1072,8 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
{
const int cpu = smp_processor_id();
struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *const scurr = rt_vcpu(current);
- struct rt_vcpu *snext = NULL;
+ struct rt_item *const scurr = rt_item(current->sched_item);
+ struct rt_item *snext = NULL;
struct task_slice ret = { .migrated = 0 };
/* TRACE */
@@ -1099,13 +1099,13 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
if ( tasklet_work_scheduled )
{
trace_var(TRC_RTDS_SCHED_TASKLET, 1, 0, NULL);
- snext = rt_vcpu(idle_vcpu[cpu]);
+ snext = rt_item(idle_vcpu[cpu]->sched_item);
}
else
{
snext = runq_pick(ops, cpumask_of(cpu));
if ( snext == NULL )
- snext = rt_vcpu(idle_vcpu[cpu]);
+ snext = rt_item(idle_vcpu[cpu]->sched_item);
/* if scurr has higher priority and budget, still pick scurr */
if ( !is_idle_vcpu(current) &&
@@ -1151,12 +1151,12 @@ static void
rt_item_sleep(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu * const svc = rt_vcpu(vc);
+ struct rt_item * const svc = rt_item(item);
BUG_ON( is_idle_vcpu(vc) );
SCHED_STAT_CRANK(vcpu_sleep);
- if ( curr_on_cpu(vc->processor) == vc )
+ if ( curr_on_cpu(vc->processor) == item )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
else if ( vcpu_on_q(svc) )
{
@@ -1186,11 +1186,11 @@ rt_item_sleep(const struct scheduler *ops, struct sched_item *item)
* lock is grabbed before calling this function
*/
static void
-runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
+runq_tickle(const struct scheduler *ops, struct rt_item *new)
{
struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *latest_deadline_vcpu = NULL; /* lowest priority */
- struct rt_vcpu *iter_svc;
+ struct rt_item *latest_deadline_vcpu = NULL; /* lowest priority */
+ struct rt_item *iter_svc;
struct vcpu *iter_vc;
int cpu = 0, cpu_to_tickle = 0;
cpumask_t not_tickled;
@@ -1211,14 +1211,14 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
cpu = cpumask_test_or_cycle(new->vcpu->processor, ¬_tickled);
while ( cpu!= nr_cpu_ids )
{
- iter_vc = curr_on_cpu(cpu);
+ iter_vc = curr_on_cpu(cpu)->vcpu;
if ( is_idle_vcpu(iter_vc) )
{
SCHED_STAT_CRANK(tickled_idle_cpu);
cpu_to_tickle = cpu;
goto out;
}
- iter_svc = rt_vcpu(iter_vc);
+ iter_svc = rt_item(iter_vc->sched_item);
if ( latest_deadline_vcpu == NULL ||
compare_vcpu_priority(iter_svc, latest_deadline_vcpu) < 0 )
latest_deadline_vcpu = iter_svc;
@@ -1267,13 +1267,13 @@ static void
rt_item_wake(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu * const svc = rt_vcpu(vc);
+ struct rt_item * const svc = rt_item(item);
s_time_t now;
bool_t missed;
BUG_ON( is_idle_vcpu(vc) );
- if ( unlikely(curr_on_cpu(vc->processor) == vc) )
+ if ( unlikely(curr_on_cpu(vc->processor) == item) )
{
SCHED_STAT_CRANK(vcpu_wake_running);
return;
@@ -1338,7 +1338,7 @@ static void
rt_context_saved(const struct scheduler *ops, struct sched_item *item)
{
struct vcpu *vc = item->vcpu;
- struct rt_vcpu *svc = rt_vcpu(vc);
+ struct rt_item *svc = rt_item(item);
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
__clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1369,7 +1369,7 @@ rt_dom_cntl(
struct xen_domctl_scheduler_op *op)
{
struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *svc;
+ struct rt_item *svc;
struct vcpu *v;
unsigned long flags;
int rc = 0;
@@ -1393,7 +1393,7 @@ rt_dom_cntl(
spin_lock_irqsave(&prv->lock, flags);
for_each_vcpu ( d, v )
{
- svc = rt_vcpu(v);
+ svc = rt_item(v->sched_item);
svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
svc->budget = MICROSECS(op->u.rtds.budget);
}
@@ -1419,7 +1419,7 @@ rt_dom_cntl(
if ( op->cmd == XEN_DOMCTL_SCHEDOP_getvcpuinfo )
{
spin_lock_irqsave(&prv->lock, flags);
- svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+ svc = rt_item(d->vcpu[local_sched.vcpuid]->sched_item);
local_sched.u.rtds.budget = svc->budget / MICROSECS(1);
local_sched.u.rtds.period = svc->period / MICROSECS(1);
if ( has_extratime(svc) )
@@ -1447,7 +1447,7 @@ rt_dom_cntl(
}
spin_lock_irqsave(&prv->lock, flags);
- svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+ svc = rt_item(d->vcpu[local_sched.vcpuid]->sched_item);
svc->period = period;
svc->budget = budget;
if ( local_sched.u.rtds.flags & XEN_DOMCTL_SCHEDRT_extra )
@@ -1480,7 +1480,7 @@ static void repl_timer_handler(void *data){
struct list_head *replq = rt_replq(ops);
struct list_head *runq = rt_runq(ops);
struct list_head *iter, *tmp;
- struct rt_vcpu *svc;
+ struct rt_item *svc;
LIST_HEAD(tmp_replq);
spin_lock_irq(&prv->lock);
@@ -1522,10 +1522,10 @@ static void repl_timer_handler(void *data){
{
svc = replq_elem(iter);
- if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu &&
+ if ( curr_on_cpu(svc->vcpu->processor) == svc->vcpu->sched_item &&
!list_empty(runq) )
{
- struct rt_vcpu *next_on_runq = q_elem(runq->next);
+ struct rt_item *next_on_runq = q_elem(runq->next);
if ( compare_vcpu_priority(svc, next_on_runq) < 0 )
runq_tickle(ops, next_on_runq);
@@ -338,7 +338,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
/* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
if ( is_idle_domain(d) )
{
- per_cpu(schedule_data, v->processor).curr = v;
+ per_cpu(schedule_data, v->processor).curr = item;
v->is_running = 1;
}
else
@@ -1533,7 +1533,7 @@ static void schedule(void)
next = next_slice.task;
- sd->curr = next;
+ sd->curr = next->sched_item;
if ( next_slice.time >= 0 ) /* -ve means no limit */
set_timer(&sd->s_timer, now + next_slice.time);
@@ -1656,7 +1656,6 @@ static int cpu_schedule_up(unsigned int cpu)
per_cpu(scheduler, cpu) = &ops;
spin_lock_init(&sd->_lock);
sd->schedule_lock = &sd->_lock;
- sd->curr = idle_vcpu[cpu];
init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
atomic_set(&sd->urgent_count, 0);
@@ -1690,6 +1689,8 @@ static int cpu_schedule_up(unsigned int cpu)
if ( idle_vcpu[cpu] == NULL )
return -ENOMEM;
+ sd->curr = idle_vcpu[cpu]->sched_item;
+
/*
* We don't want to risk calling xfree() on an sd->sched_priv
* (e.g., inside free_pdata, from cpu_schedule_down() called
@@ -1859,6 +1860,7 @@ void __init scheduler_init(void)
idle_domain->max_vcpus = nr_cpu_ids;
if ( vcpu_create(idle_domain, 0, 0) == NULL )
BUG();
+ this_cpu(schedule_data).curr = idle_vcpu[0]->sched_item;
this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
SCHED_OP(&ops, init_pdata, this_cpu(schedule_data).sched_priv, 0);
@@ -36,7 +36,7 @@ extern int sched_ratelimit_us;
struct schedule_data {
spinlock_t *schedule_lock,
_lock;
- struct vcpu *curr; /* current task */
+ struct sched_item *curr; /* current task */
void *sched_priv;
struct timer s_timer; /* scheduling timer */
atomic_t urgent_count; /* how many urgent vcpus */
In preparation of core scheduling let the percpu pointer schedule_data.curr point to a strct sched_item instead of the related vcpu. At the same time rename the per-vcpu scheduler specific structs to per-item ones. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/sched_arinc653.c | 2 +- xen/common/sched_credit.c | 101 +++++++++++++------------- xen/common/sched_credit2.c | 168 ++++++++++++++++++++++---------------------- xen/common/sched_null.c | 44 ++++++------ xen/common/sched_rt.c | 118 +++++++++++++++---------------- xen/common/schedule.c | 8 ++- xen/include/xen/sched-if.h | 2 +- 7 files changed, 220 insertions(+), 223 deletions(-)