@@ -68,6 +68,7 @@ static void poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU(struct scheduler *, scheduler);
DEFINE_PER_CPU(struct sched_resource *, sched_res);
+static DEFINE_PER_CPU(unsigned int, sched_res_idx);
/* Scratch space for cpumasks. */
DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
@@ -78,6 +79,12 @@ extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_arr
static struct scheduler __read_mostly ops;
+static inline struct vcpu *sched_item2vcpu_cpu(struct sched_item *item,
+ unsigned int cpu)
+{
+ return item->domain->vcpu[item->item_id + per_cpu(sched_res_idx, cpu)];
+}
+
static inline struct scheduler *dom_scheduler(const struct domain *d)
{
if ( likely(d->cpupool != NULL) )
@@ -1844,7 +1851,7 @@ static void sched_slave(void)
pcpu_schedule_unlock_irq(lock, cpu);
- sched_context_switch(vprev, next->vcpu, now);
+ sched_context_switch(vprev, sched_item2vcpu_cpu(next, cpu), now);
}
/*
@@ -1903,7 +1910,7 @@ static void schedule(void)
pcpu_schedule_unlock_irq(lock, cpu);
- vnext = next->vcpu;
+ vnext = sched_item2vcpu_cpu(next, cpu);
sched_context_switch(vprev, vnext, now);
}
Add a percpu variable holding the index of the cpu in the current sched_resource structure. This index is used to get the correct vcpu of a sched_item on a specific cpu. For now this index will be zero for all cpus, but with core scheduling it will be possible to have higher values, too. Signed-off-by: Juergen Gross <jgross@suse.com> --- RFC V2: new patch (carved out from RFC V1 patch 49) --- xen/common/schedule.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)