Message ID | 20190914085251.18816-26-jgross@suse.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | xen: add core scheduling support | expand |
On 14.09.2019 10:52, Juergen Gross wrote: > @@ -368,7 +372,7 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) > unit->vcpu_list = v; > unit->unit_id = v->vcpu_id; > unit->domain = d; > - v->sched_unit = unit; > + unit->runstate_cnt[v->runstate.state]++; > > for ( prev_unit = &d->sched_unit_list; *prev_unit; > prev_unit = &(*prev_unit)->next_in_list ) > @@ -384,6 +388,8 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) > !zalloc_cpumask_var(&unit->cpu_soft_affinity) ) > goto fail; > > + v->sched_unit = unit; > + > return unit; > > fail: Is this movement of the assignment something which really belongs here, rather than in some earlier patch (perhaps the one actually introducing it)? Jan
On 20.09.19 17:27, Jan Beulich wrote: > On 14.09.2019 10:52, Juergen Gross wrote: >> @@ -368,7 +372,7 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) >> unit->vcpu_list = v; >> unit->unit_id = v->vcpu_id; >> unit->domain = d; >> - v->sched_unit = unit; >> + unit->runstate_cnt[v->runstate.state]++; >> >> for ( prev_unit = &d->sched_unit_list; *prev_unit; >> prev_unit = &(*prev_unit)->next_in_list ) >> @@ -384,6 +388,8 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) >> !zalloc_cpumask_var(&unit->cpu_soft_affinity) ) >> goto fail; >> >> + v->sched_unit = unit; >> + >> return unit; >> >> fail: > > Is this movement of the assignment something which really belongs > here, rather than in some earlier patch (perhaps the one actually > introducing it)? It certainly should be part of an earlier patch. Juergen
diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 7b37461db9..0bd9f0d278 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -239,6 +239,7 @@ static inline void vcpu_runstate_change( struct vcpu *v, int new_state, s_time_t new_entry_time) { s_time_t delta; + struct sched_unit *unit = v->sched_unit; ASSERT(v->runstate.state != new_state); ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock)); @@ -247,6 +248,9 @@ static inline void vcpu_runstate_change( trace_runstate_change(v, new_state); + unit->runstate_cnt[v->runstate.state]--; + unit->runstate_cnt[new_state]++; + delta = new_entry_time - v->runstate.state_entry_time; if ( delta > 0 ) { @@ -368,7 +372,7 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) unit->vcpu_list = v; unit->unit_id = v->vcpu_id; unit->domain = d; - v->sched_unit = unit; + unit->runstate_cnt[v->runstate.state]++; for ( prev_unit = &d->sched_unit_list; *prev_unit; prev_unit = &(*prev_unit)->next_in_list ) @@ -384,6 +388,8 @@ static struct sched_unit *sched_alloc_unit(struct vcpu *v) !zalloc_cpumask_var(&unit->cpu_soft_affinity) ) goto fail; + v->sched_unit = unit; + return unit; fail: diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 56ed863b08..36257a06fe 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -277,6 +277,8 @@ struct sched_unit { /* Last time unit got (de-)scheduled. */ uint64_t state_entry_time; + /* Vcpu state summary. */ + unsigned int runstate_cnt[4]; /* Bitmask of CPUs on which this VCPU may run. */ cpumask_var_t cpu_hard_affinity;
Add counters to struct sched_unit summing up runstates of associated vcpus. This allows doing quick checks whether a unit has any vcpu running or whether only a single vcpu of a unit is running. Signed-off-by: Juergen Gross <jgross@suse.com> --- RFC V2: add counters for each possible runstate --- xen/common/schedule.c | 8 +++++++- xen/include/xen/sched.h | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-)