@@ -305,6 +305,55 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
spin_unlock_irqrestore(lock1, flags);
}
+static void sched_free_unit(struct sched_unit *unit)
+{
+ struct sched_unit *prev_unit;
+ struct domain *d = unit->domain;
+
+ if ( d->sched_unit_list == unit )
+ d->sched_unit_list = unit->next_in_list;
+ else
+ {
+ for_each_sched_unit ( d, prev_unit )
+ {
+ if ( prev_unit->next_in_list == unit )
+ {
+ prev_unit->next_in_list = unit->next_in_list;
+ break;
+ }
+ }
+ }
+
+ unit->vcpu_list->sched_unit = NULL;
+ xfree(unit);
+}
+
+static struct sched_unit *sched_alloc_unit(struct vcpu *v)
+{
+ struct sched_unit *unit, **prev_unit;
+ struct domain *d = v->domain;
+
+ if ( (unit = xzalloc(struct sched_unit)) == NULL )
+ return NULL;
+
+ unit->vcpu_list = v;
+ unit->unit_id = v->vcpu_id;
+ unit->domain = d;
+
+ for ( prev_unit = &d->sched_unit_list; *prev_unit;
+ prev_unit = &(*prev_unit)->next_in_list )
+ if ( (*prev_unit)->next_in_list &&
+ (*prev_unit)->next_in_list->unit_id > unit->unit_id )
+ break;
+
+ unit->next_in_list = *prev_unit;
+ *prev_unit = unit;
+
+ v->sched_unit = unit;
+
+ return unit;
+}
+
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
struct domain *d = v->domain;
@@ -312,11 +361,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
v->processor = processor;
- if ( (unit = xzalloc(struct sched_unit)) == NULL )
+ if ( (unit = sched_alloc_unit(v)) == NULL )
return 1;
- unit->vcpu_list = v;
- unit->unit_id = v->vcpu_id;
- unit->domain = d;
/* Initialise the per-vcpu timers. */
spin_lock_init(&v->periodic_timer_lock);
@@ -330,12 +376,10 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
unit->priv = sched_alloc_udata(dom_scheduler(d), unit, d->sched_priv);
if ( unit->priv == NULL )
{
- xfree(unit);
+ sched_free_unit(unit);
return 1;
}
- v->sched_unit = unit;
-
/*
* Initialize affinity settings. The idler, and potentially
* domain-0 VCPUs, are pinned onto their respective physical CPUs.
@@ -475,8 +519,7 @@ void sched_destroy_vcpu(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_unit(vcpu_scheduler(v), unit);
sched_free_udata(vcpu_scheduler(v), unit->priv);
- v->sched_unit = NULL;
- xfree(unit);
+ sched_free_unit(unit);
}
int sched_init_domain(struct domain *d, int poolid)
@@ -277,9 +277,24 @@ struct sched_unit {
struct domain *domain;
struct vcpu *vcpu_list;
void *priv; /* scheduler private data */
+ struct sched_unit *next_in_list;
unsigned int unit_id;
};
+#define for_each_sched_unit(d, u) \
+ for ( (u) = (d)->sched_unit_list; (u) != NULL; (u) = (u)->next_in_list )
+
+/*
+ * All vcpus of a domain are in a single linked list with unit->vcpu_list
+ * pointing to the first vcpu of the unit. The loop must be terminated when
+ * a vcpu is hit not being part of the unit to loop over.
+ */
+#define for_each_sched_unit_vcpu(u, v) \
+ for ( (v) = (u)->vcpu_list; \
+ (v) != NULL && (!(u)->next_in_list || \
+ (v)->vcpu_id < (u)->next_in_list->unit_id); \
+ (v) = (v)->next_in_list )
+
/* Per-domain lock can be recursively acquired in fault handlers. */
#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
@@ -333,6 +348,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
+ struct sched_unit *sched_unit_list;
struct cpupool *cpupool;
struct domain *next_in_list;