@@ -184,8 +184,9 @@ static inline void vcpu_runstate_change(
s_time_t delta;
bool old_run, new_run;
- ASSERT(v->runstate.state != new_state);
ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
+ if ( v->runstate.state == new_state )
+ return;
vcpu_urgent_count_update(v);
@@ -221,18 +222,23 @@ static inline void vcpu_runstate_change(
v->runstate.state = new_state;
}
+static inline void vcpu_runstate_helper(struct vcpu *v, int new_state,
+ s_time_t new_entry_time)
+{
+ vcpu_runstate_change(v,
+ ((v->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
+ (vcpu_runnable(v) ? new_state : RUNSTATE_offline)),
+ new_entry_time);
+}
+
static inline void sched_item_runstate_change(struct sched_item *item,
bool running, s_time_t new_entry_time)
{
- struct vcpu *v = item->vcpu;
+ int new_state = running ? RUNSTATE_running : RUNSTATE_runnable;
+ struct vcpu *v;
- if ( running )
- vcpu_runstate_change(v, RUNSTATE_running, new_entry_time);
- else
- vcpu_runstate_change(v,
- ((v->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
- (vcpu_runnable(v) ? RUNSTATE_runnable : RUNSTATE_offline)),
- new_entry_time);
+ for_each_sched_item_vcpu( item, v )
+ vcpu_runstate_helper(v, new_state, new_entry_time);
}
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
@@ -1616,7 +1622,7 @@ static void sched_switch_items(struct sched_resource *sd,
(next->vcpu->runstate.state == RUNSTATE_runnable) ?
(now - next->state_entry_time) : 0, prev->next_time);
- ASSERT(prev->vcpu->runstate.state == RUNSTATE_running);
+ ASSERT(item_running(prev));
TRACE_4D(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->item_id,
next->domain->domain_id, next->item_id);
@@ -1624,7 +1630,7 @@ static void sched_switch_items(struct sched_resource *sd,
sched_item_runstate_change(prev, false, now);
prev->last_run_time = now;
- ASSERT(next->vcpu->runstate.state != RUNSTATE_running);
+ ASSERT(!item_running(next));
sched_item_runstate_change(next, true, now);
/*
@@ -107,15 +107,41 @@ static inline bool is_idle_item(const struct sched_item *item)
return is_idle_vcpu(item->vcpu);
}
+static inline bool item_running(const struct sched_item *item)
+{
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ if ( v->runstate.state == RUNSTATE_running )
+ return true;
+
+ return false;
+}
+
static inline bool item_runnable(const struct sched_item *item)
{
- return vcpu_runnable(item->vcpu);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ if ( vcpu_runnable(v) )
+ return true;
+
+ return false;
}
static inline void sched_set_res(struct sched_item *item,
struct sched_resource *res)
{
- item->vcpu->processor = res->processor;
+ int cpu = cpumask_first(res->cpus);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ {
+ ASSERT(cpu < nr_cpu_ids);
+ v->processor = cpu;
+ cpu = cpumask_next(cpu, res->cpus);
+ }
+
item->res = res;
}
@@ -127,25 +153,37 @@ static inline unsigned int sched_item_cpu(struct sched_item *item)
static inline void sched_set_pause_flags(struct sched_item *item,
unsigned int bit)
{
- __set_bit(bit, &item->vcpu->pause_flags);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ __set_bit(bit, &v->pause_flags);
}
static inline void sched_clear_pause_flags(struct sched_item *item,
unsigned int bit)
{
- __clear_bit(bit, &item->vcpu->pause_flags);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ __clear_bit(bit, &v->pause_flags);
}
static inline void sched_set_pause_flags_atomic(struct sched_item *item,
unsigned int bit)
{
- set_bit(bit, &item->vcpu->pause_flags);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ set_bit(bit, &v->pause_flags);
}
static inline void sched_clear_pause_flags_atomic(struct sched_item *item,
unsigned int bit)
{
- clear_bit(bit, &item->vcpu->pause_flags);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ clear_bit(bit, &v->pause_flags);
}
static inline struct sched_item *sched_idle_item(unsigned int cpu)
@@ -327,12 +365,18 @@ static inline void sched_free_domdata(const struct scheduler *s,
static inline void sched_item_pause_nosync(struct sched_item *item)
{
- vcpu_pause_nosync(item->vcpu);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ vcpu_pause_nosync(v);
}
static inline void sched_item_unpause(struct sched_item *item)
{
- vcpu_unpause(item->vcpu);
+ struct vcpu *v;
+
+ for_each_sched_item_vcpu( item, v )
+ vcpu_unpause(v);
}
#define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
In several places there is support for multiple vcpus per sched item missing. Add that missing support (with the exception of initial allocation) and missing helpers for that. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/schedule.c | 28 +++++++++++++--------- xen/include/xen/sched-if.h | 60 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 69 insertions(+), 19 deletions(-)