@@ -305,20 +305,64 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
spin_unlock_irqrestore(lock1, flags);
}
-int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+static void sched_free_unit(struct sched_unit *unit)
{
- struct domain *d = v->domain;
- struct sched_unit *unit;
+ struct sched_unit *prev_unit;
+ struct domain *d = unit->domain;
- v->processor = processor;
+ if ( d->sched_unit_list == unit )
+ d->sched_unit_list = unit->next_in_list;
+ else
+ {
+ for_each_sched_unit ( d, prev_unit )
+ {
+ if ( prev_unit->next_in_list == unit )
+ {
+ prev_unit->next_in_list = unit->next_in_list;
+ break;
+ }
+ }
+ }
+
+ unit->vcpu_list->sched_unit = NULL;
+ xfree(unit);
+}
+
+static struct sched_unit *sched_alloc_unit(struct vcpu *v)
+{
+ struct sched_unit *unit, **prev_unit;
+ struct domain *d = v->domain;
if ( (unit = xzalloc(struct sched_unit)) == NULL )
- return 1;
+ return NULL;
+
unit->vcpu_list = v;
unit->unit_id = v->vcpu_id;
unit->domain = d;
v->sched_unit = unit;
+ for ( prev_unit = &d->sched_unit_list; *prev_unit;
+ prev_unit = &(*prev_unit)->next_in_list )
+ if ( (*prev_unit)->next_in_list &&
+ (*prev_unit)->next_in_list->unit_id > unit->unit_id )
+ break;
+
+ unit->next_in_list = *prev_unit;
+ *prev_unit = unit;
+
+ return unit;
+}
+
+int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+{
+ struct domain *d = v->domain;
+ struct sched_unit *unit;
+
+ v->processor = processor;
+
+ if ( (unit = sched_alloc_unit(v)) == NULL )
+ return 1;
+
/* Initialise the per-vcpu timers. */
spin_lock_init(&v->periodic_timer_lock);
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
@@ -331,8 +375,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
unit->priv = sched_alloc_vdata(dom_scheduler(d), unit, d->sched_priv);
if ( unit->priv == NULL )
{
- v->sched_unit = NULL;
- xfree(unit);
+ sched_free_unit(unit);
return 1;
}
@@ -475,8 +518,7 @@ void sched_destroy_vcpu(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_unit(vcpu_scheduler(v), unit);
sched_free_vdata(vcpu_scheduler(v), unit->priv);
- v->sched_unit = NULL;
- xfree(unit);
+ sched_free_unit(unit);
}
int sched_init_domain(struct domain *d, int poolid)
@@ -277,9 +277,22 @@ struct sched_unit {
struct domain *domain;
struct vcpu *vcpu_list;
void *priv; /* scheduler private data */
+ struct sched_unit *next_in_list;
unsigned int unit_id;
};
+#define for_each_sched_unit(d, e) \
+ for ( (e) = (d)->sched_unit_list; (e) != NULL; (e) = (e)->next_in_list )
+
+/*
+ * All vcpus of a domain are in a single linked list with unit->vcpu_list
+ * pointing to the first vcpu of the unit. The loop must be terminated when
+ * a vcpu is hit not being part of the unit to loop over.
+ */
+#define for_each_sched_unit_vcpu(i, v) \
+ for ( (v) = (i)->vcpu_list; (v) != NULL && (v)->sched_unit == (i); \
+ (v) = (v)->next_in_list )
+
/* Per-domain lock can be recursively acquired in fault handlers. */
#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
@@ -333,6 +346,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
+ struct sched_unit *sched_unit_list;
struct cpupool *cpupool;
struct domain *next_in_list;