@@ -744,35 +744,40 @@ void vcpu_unblock(struct vcpu *v)
}
/*
- * Do the actual movement of a vcpu from old to new CPU. Locks for *both*
+ * Do the actual movement of an unit from old to new CPU. Locks for *both*
* CPUs needs to have been taken already when calling this!
*/
-static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
+static void sched_unit_move_locked(struct sched_unit *unit,
+ unsigned int new_cpu)
{
- unsigned int old_cpu = v->processor;
+ unsigned int old_cpu = unit->res->master_cpu;
+ struct vcpu *v;
/*
* Transfer urgency status to new CPU before switching CPUs, as
* once the switch occurs, v->is_urgent is no longer protected by
* the per-CPU scheduler lock we are holding.
*/
- if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+ for_each_sched_unit_vcpu ( unit, v )
{
- atomic_inc(&per_cpu(sched_urgent_count, new_cpu));
- atomic_dec(&per_cpu(sched_urgent_count, old_cpu));
+ if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+ {
+ atomic_inc(&per_cpu(sched_urgent_count, new_cpu));
+ atomic_dec(&per_cpu(sched_urgent_count, old_cpu));
+ }
}
/*
* Actual CPU switch to new CPU. This is safe because the lock
* pointer can't change while the current lock is held.
*/
- sched_migrate(vcpu_scheduler(v), v->sched_unit, new_cpu);
+ sched_migrate(unit_scheduler(unit), unit, new_cpu);
}
/*
* Initiating migration
*
- * In order to migrate, we need the vcpu in question to have stopped
+ * In order to migrate, we need the unit in question to have stopped
* running and had sched_sleep() called (to take it off any
* runqueues, for instance); and if it is currently running, it needs
* to be scheduled out. Finally, we need to hold the scheduling locks
@@ -788,37 +793,45 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
* should be called like this:
*
* lock = unit_schedule_lock_irq(unit);
- * vcpu_migrate_start(v);
+ * sched_unit_migrate_start(unit);
* unit_schedule_unlock_irq(lock, unit)
- * vcpu_migrate_finish(v);
+ * sched_unit_migrate_finish(unit);
*
- * vcpu_migrate_finish() will do the work now if it can, or simply
- * return if it can't (because v is still running); in that case
- * vcpu_migrate_finish() will be called by context_saved().
+ * sched_unit_migrate_finish() will do the work now if it can, or simply
+ * return if it can't (because unit is still running); in that case
+ * sched_unit_migrate_finish() will be called by context_saved().
*/
-static void vcpu_migrate_start(struct vcpu *v)
+static void sched_unit_migrate_start(struct sched_unit *unit)
{
- set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_sleep_nosync_locked(v);
+ struct vcpu *v;
+
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_sleep_nosync_locked(v);
+ }
}
-static void vcpu_migrate_finish(struct vcpu *v)
+static void sched_unit_migrate_finish(struct sched_unit *unit)
{
unsigned long flags;
unsigned int old_cpu, new_cpu;
spinlock_t *old_lock, *new_lock;
bool_t pick_called = 0;
+ struct vcpu *v;
/*
- * If the vcpu is currently running, this will be handled by
+ * If the unit is currently running, this will be handled by
* context_saved(); and in any case, if the bit is cleared, then
* someone else has already done the work so we don't need to.
*/
- if ( v->sched_unit->is_running ||
- !test_bit(_VPF_migrating, &v->pause_flags) )
+ if ( unit->is_running )
return;
+ for_each_sched_unit_vcpu ( unit, v )
+ if ( !test_bit(_VPF_migrating, &v->pause_flags) )
+ return;
- old_cpu = new_cpu = v->processor;
+ old_cpu = new_cpu = unit->res->master_cpu;
for ( ; ; )
{
/*
@@ -831,7 +844,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
sched_spin_lock_double(old_lock, new_lock, &flags);
- old_cpu = v->processor;
+ old_cpu = unit->res->master_cpu;
if ( old_lock == get_sched_res(old_cpu)->schedule_lock )
{
/*
@@ -840,15 +853,15 @@ static void vcpu_migrate_finish(struct vcpu *v)
*/
if ( pick_called &&
(new_lock == get_sched_res(new_cpu)->schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->sched_unit->cpu_hard_affinity) &&
- cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, unit->cpu_hard_affinity) &&
+ cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
break;
/* Select a new CPU. */
- new_cpu = sched_pick_resource(vcpu_scheduler(v),
- v->sched_unit)->master_cpu;
+ new_cpu = sched_pick_resource(unit_scheduler(unit),
+ unit)->master_cpu;
if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
+ cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
break;
pick_called = 1;
}
@@ -869,22 +882,35 @@ static void vcpu_migrate_finish(struct vcpu *v)
* because they both happen in (different) spinlock regions, and those
* regions are strictly serialised.
*/
- if ( v->sched_unit->is_running ||
- !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+ if ( unit->is_running )
{
sched_spin_unlock_double(old_lock, new_lock, flags);
return;
}
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ if ( !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
+ {
+ sched_spin_unlock_double(old_lock, new_lock, flags);
+ return;
+ }
+ }
- vcpu_move_locked(v, new_cpu);
+ sched_unit_move_locked(unit, new_cpu);
sched_spin_unlock_double(old_lock, new_lock, flags);
if ( old_cpu != new_cpu )
- sched_move_irqs(v->sched_unit);
+ {
+ /* Vcpus are moved to other pcpus, commit their states to memory. */
+ for_each_sched_unit_vcpu ( unit, v )
+ sync_vcpu_execstate(v);
+ sched_move_irqs(unit);
+ }
/* Wake on new CPU. */
- vcpu_wake(v);
+ for_each_sched_unit_vcpu ( unit, v )
+ vcpu_wake(v);
}
static bool sched_check_affinity_broken(const struct sched_unit *unit)
@@ -1033,11 +1059,9 @@ int cpu_disable_scheduler(unsigned int cpu)
* * the scheduler will always find a suitable solution, or
* things would have failed before getting in here.
*/
- /* TODO: multiple vcpus per unit. */
- vcpu_migrate_start(unit->vcpu_list);
+ sched_unit_migrate_start(unit);
unit_schedule_unlock_irqrestore(lock, flags, unit);
-
- vcpu_migrate_finish(unit->vcpu_list);
+ sched_unit_migrate_finish(unit);
/*
* The only caveat, in this case, is that if a vcpu active in
@@ -1121,14 +1145,14 @@ static int vcpu_set_affinity(
ASSERT(which == unit->cpu_soft_affinity);
sched_set_affinity(v, NULL, affinity);
}
- vcpu_migrate_start(v);
+ sched_unit_migrate_start(unit);
}
unit_schedule_unlock_irq(lock, unit);
domain_update_node_affinity(v->domain);
- vcpu_migrate_finish(v);
+ sched_unit_migrate_finish(unit);
return ret;
}
@@ -1389,12 +1413,12 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
migrate = !ret && !cpumask_test_cpu(v->processor, unit->cpu_hard_affinity);
if ( migrate )
- vcpu_migrate_start(v);
+ sched_unit_migrate_start(unit);
unit_schedule_unlock_irq(lock, unit);
if ( migrate )
- vcpu_migrate_finish(v);
+ sched_unit_migrate_finish(unit);
return ret;
}
@@ -1811,7 +1835,7 @@ void context_saved(struct vcpu *prev)
sched_context_saved(vcpu_scheduler(prev), prev->sched_unit);
- vcpu_migrate_finish(prev);
+ sched_unit_migrate_finish(prev->sched_unit);
}
/* The scheduler timer: force a run through the scheduler */
Now that vcpu_migrate_start() and vcpu_migrate_finish() are used only to ensure a vcpu is running on a suitable processor they can be switched to operate on schedule units instead of vcpus. While doing that rename them accordingly and make the _start() variant static. As it is needed anyway call vcpu_sync_execstate() for each vcpu of the unit when changing processors. vcpu_move_locked() is switched to schedule unit, too. Signed-off-by: Juergen Gross <jgross@suse.com> --- V3: - move tested invariant condition out of loop (Jan Beulich) - add comment regarding call of vcpu_sync_execstate() (Jan Beulich) --- xen/common/schedule.c | 106 +++++++++++++++++++++++++++++++------------------- 1 file changed, 65 insertions(+), 41 deletions(-)