@@ -71,6 +71,7 @@ static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU(struct scheduler *, scheduler);
DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned int, sched_res_idx);
/* Scratch space for cpumasks. */
DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
@@ -132,6 +133,12 @@ static struct scheduler sched_idle_ops = {
.switch_sched = sched_idle_switch_sched,
};
+static inline struct vcpu *sched_unit2vcpu_cpu(struct sched_unit *unit,
+ unsigned int cpu)
+{
+ return unit->domain->vcpu[unit->unit_id + per_cpu(sched_res_idx, cpu)];
+}
+
static inline struct scheduler *dom_scheduler(const struct domain *d)
{
if ( likely(d->cpupool != NULL) )
@@ -1988,7 +1995,7 @@ static void sched_slave(void)
pcpu_schedule_unlock_irq(lock, cpu);
- sched_context_switch(vprev, next->vcpu_list, now);
+ sched_context_switch(vprev, sched_unit2vcpu_cpu(next, cpu), now);
}
/*
@@ -2047,7 +2054,7 @@ static void schedule(void)
pcpu_schedule_unlock_irq(lock, cpu);
- vnext = next->vcpu_list;
+ vnext = sched_unit2vcpu_cpu(next, cpu);
sched_context_switch(vprev, vnext, now);
}
Add a percpu variable holding the index of the cpu in the current sched_resource structure. This index is used to get the correct vcpu of a sched_unit on a specific cpu. For now this index will be zero for all cpus, but with core scheduling it will be possible to have higher values, too. Signed-off-by: Juergen Gross <jgross@suse.com> --- RFC V2: new patch (carved out from RFC V1 patch 49) --- xen/common/schedule.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)