@@ -174,6 +174,7 @@ static inline void vcpu_runstate_change(
struct vcpu *v, int new_state, s_time_t new_entry_time)
{
s_time_t delta;
+ struct sched_item *item = v->sched_item;
ASSERT(v->runstate.state != new_state);
ASSERT(spin_is_locked(per_cpu(sched_res, v->processor)->schedule_lock));
@@ -182,6 +183,9 @@ static inline void vcpu_runstate_change(
trace_runstate_change(v, new_state);
+ item->runstate_cnt[v->runstate.state]--;
+ item->runstate_cnt[new_state]++;
+
delta = new_entry_time - v->runstate.state_entry_time;
if ( delta > 0 )
{
@@ -359,6 +363,8 @@ int sched_init_vcpu(struct vcpu *v)
if ( (item = sched_alloc_item(v)) == NULL )
return 1;
+ item->runstate_cnt[v->runstate.state]++;
+
if ( is_idle_domain(d) )
processor = v->vcpu_id;
else
@@ -272,6 +272,8 @@ struct sched_item {
uint64_t last_run_time;
/* Last time item got (de-)scheduled. */
uint64_t state_entry_time;
+ /* Vcpu state summary. */
+ unsigned int runstate_cnt[4];
/* Currently running on a CPU? */
bool is_running;
Add counters to struct sched_item summing up runstates of associated vcpus. Signed-off-by: Juergen Gross <jgross@suse.com> --- RFC V2: add counters for each possible runstate --- xen/common/schedule.c | 6 ++++++ xen/include/xen/sched.h | 2 ++ 2 files changed, 8 insertions(+)