@@ -253,6 +253,52 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
spin_unlock_irqrestore(lock1, flags);
}
+static void sched_free_item(struct sched_item *item)
+{
+ struct sched_item *prev_item;
+ struct domain *d = item->vcpu->domain;
+
+ if ( d->sched_item_list == item )
+ d->sched_item_list = item->next_in_list;
+ else
+ {
+ for_each_sched_item(d, prev_item)
+ {
+ if ( prev_item->next_in_list == item )
+ {
+ prev_item->next_in_list = item->next_in_list;
+ break;
+ }
+ }
+ }
+
+ item->vcpu->sched_item = NULL;
+ xfree(item);
+}
+
+static struct sched_item *sched_alloc_item(struct vcpu *v)
+{
+ struct sched_item *item, **prev_item;
+ struct domain *d = v->domain;
+
+ if ( (item = xzalloc(struct sched_item)) == NULL )
+ return NULL;
+
+ v->sched_item = item;
+ item->vcpu = v;
+
+ for ( prev_item = &d->sched_item_list; *prev_item;
+ prev_item = &(*prev_item)->next_in_list )
+ if ( (*prev_item)->next_in_list &&
+ (*prev_item)->next_in_list->vcpu->vcpu_id > v->vcpu_id )
+ break;
+
+ item->next_in_list = *prev_item;
+ *prev_item = item;
+
+ return item;
+}
+
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
struct domain *d = v->domain;
@@ -260,10 +306,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
v->processor = processor;
- if ( (item = xzalloc(struct sched_item)) == NULL )
+ if ( (item = sched_alloc_item(v)) == NULL )
return 1;
- v->sched_item = item;
- item->vcpu = v;
/* Initialise the per-vcpu timers. */
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
@@ -276,8 +320,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
item->priv = SCHED_OP(dom_scheduler(d), alloc_vdata, item, d->sched_priv);
if ( item->priv == NULL )
{
- v->sched_item = NULL;
- xfree(item);
+ sched_free_item(item);
return 1;
}
@@ -420,8 +463,7 @@ void sched_destroy_vcpu(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
SCHED_OP(vcpu_scheduler(v), remove_item, item);
SCHED_OP(vcpu_scheduler(v), free_vdata, item->priv);
- xfree(item);
- v->sched_item = NULL;
+ sched_free_item(item);
}
int sched_init_domain(struct domain *d, int poolid)
@@ -51,8 +51,16 @@ DECLARE_PER_CPU(struct cpupool *, cpupool);
struct sched_item {
struct vcpu *vcpu;
void *priv; /* scheduler private data */
+ struct sched_item *next_in_list;
};
+#define for_each_sched_item(d, e) \
+ for ( (e) = (d)->sched_item_list; (e) != NULL; (e) = (e)->next_in_list )
+
+#define for_each_sched_item_vcpu(i, v) \
+ for ( (v) = (i)->vcpu; (v) != NULL && (v)->sched_item == (i); \
+ (v) = (v)->next_in_list )
+
/*
* Scratch space, for avoiding having too many cpumask_t on the stack.
* Within each scheduler, when using the scratch mask of one pCPU:
@@ -334,6 +334,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
+ struct sched_item *sched_item_list;
struct cpupool *cpupool;
struct domain *next_in_list;
In order to make it easy to iterate over sched_item elements of a domain build a single linked list and add an iterator for it. The new list is guarded by the same mechanisms as the vcpu linked list as it is modified only via vcpu_create() or vcpu_destroy(). For completeness add another iterator for_each_sched_item_vcpu() which will iterate over all vcpus if a sched_item (right now only one). This will be needed later for larger scheduling granularity (e.g. cores). Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/schedule.c | 56 ++++++++++++++++++++++++++++++++++++++++------ xen/include/xen/sched-if.h | 8 +++++++ xen/include/xen/sched.h | 1 + 3 files changed, 58 insertions(+), 7 deletions(-)