@@ -305,20 +305,64 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
spin_unlock_irqrestore(lock1, flags);
}
-int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+static void sched_free_unit(struct sched_unit *unit)
{
- struct domain *d = v->domain;
- struct sched_unit *unit;
+ struct sched_unit *prev_unit;
+ struct domain *d = unit->domain;
- v->processor = processor;
+ if ( d->sched_unit_list == unit )
+ d->sched_unit_list = unit->next_in_list;
+ else
+ {
+ for_each_sched_unit ( d, prev_unit )
+ {
+ if ( prev_unit->next_in_list == unit )
+ {
+ prev_unit->next_in_list = unit->next_in_list;
+ break;
+ }
+ }
+ }
+
+ unit->vcpu_list->sched_unit = NULL;
+ xfree(unit);
+}
+
+static struct sched_unit *sched_alloc_unit(struct vcpu *v)
+{
+ struct sched_unit *unit, **prev_unit;
+ struct domain *d = v->domain;
if ( (unit = xzalloc(struct sched_unit)) == NULL )
- return 1;
+ return NULL;
+
v->sched_unit = unit;
unit->vcpu_list = v;
unit->unit_id = v->vcpu_id;
unit->domain = d;
+ for ( prev_unit = &d->sched_unit_list; *prev_unit;
+ prev_unit = &(*prev_unit)->next_in_list )
+ if ( (*prev_unit)->next_in_list &&
+ (*prev_unit)->next_in_list->unit_id > unit->unit_id )
+ break;
+
+ unit->next_in_list = *prev_unit;
+ *prev_unit = unit;
+
+ return unit;
+}
+
+int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+{
+ struct domain *d = v->domain;
+ struct sched_unit *unit;
+
+ v->processor = processor;
+
+ if ( (unit = sched_alloc_unit(v)) == NULL )
+ return 1;
+
/* Initialise the per-vcpu timers. */
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
v, v->processor);
@@ -330,8 +374,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
unit->priv = sched_alloc_vdata(dom_scheduler(d), unit, d->sched_priv);
if ( unit->priv == NULL )
{
- v->sched_unit = NULL;
- xfree(unit);
+ sched_free_unit(unit);
return 1;
}
@@ -474,8 +517,7 @@ void sched_destroy_vcpu(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_unit(vcpu_scheduler(v), unit);
sched_free_vdata(vcpu_scheduler(v), unit->priv);
- xfree(unit);
- v->sched_unit = NULL;
+ sched_free_unit(unit);
}
int sched_init_domain(struct domain *d, int poolid)
@@ -277,9 +277,22 @@ struct sched_unit {
struct domain *domain;
struct vcpu *vcpu_list;
void *priv; /* scheduler private data */
+ struct sched_unit *next_in_list;
int unit_id;
};
+#define for_each_sched_unit(d, e) \
+ for ( (e) = (d)->sched_unit_list; (e) != NULL; (e) = (e)->next_in_list )
+
+/*
+ * All vcpus of a domain are in a single linked list with unit->vcpu_list
+ * pointing to the first vcpu of the unit. The loop must be terminated when
+ * a vcpu is hit not being part of the unit to loop over.
+ */
+#define for_each_sched_unit_vcpu(i, v) \
+ for ( (v) = (i)->vcpu_list; (v) != NULL && (v)->sched_unit == (i); \
+ (v) = (v)->next_in_list )
+
/* Per-domain lock can be recursively acquired in fault handlers. */
#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
@@ -333,6 +346,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
+ struct sched_unit *sched_unit_list;
struct cpupool *cpupool;
struct domain *next_in_list;
In order to make it easy to iterate over sched_unit elements of a domain, build a single linked list and add an iterator for it. The new list is guarded by the same mechanisms as the vcpu linked list as it is modified only via vcpu_create() or vcpu_destroy(). For completeness add another iterator for_each_sched_unit_vcpu() which will iterate over all vcpus of a sched_unit (right now only one). This will be needed later for larger scheduling granularity (e.g. cores). Signed-off-by: Juergen Gross <jgross@suse.com> --- V2: - add comment (Dario Faggioli) --- xen/common/schedule.c | 60 +++++++++++++++++++++++++++++++++++++++++-------- xen/include/xen/sched.h | 14 ++++++++++++ 2 files changed, 65 insertions(+), 9 deletions(-)