@@ -249,6 +249,52 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
spin_unlock_irqrestore(lock1, flags);
}
+static void sched_free_item(struct sched_item *item)
+{
+ struct sched_item *prev_item;
+ struct domain *d = item->vcpu->domain;
+
+ if ( d->sched_item_list == item )
+ d->sched_item_list = item->next_in_list;
+ else
+ {
+ for_each_sched_item(d, prev_item)
+ {
+ if ( prev_item->next_in_list == item )
+ {
+ prev_item->next_in_list = item->next_in_list;
+ break;
+ }
+ }
+ }
+
+ item->vcpu->sched_item = NULL;
+ xfree(item);
+}
+
+static struct sched_item *sched_alloc_item(struct vcpu *v)
+{
+ struct sched_item *item, **prev_item;
+ struct domain *d = v->domain;
+
+ if ( (item = xzalloc(struct sched_item)) == NULL )
+ return NULL;
+
+ v->sched_item = item;
+ item->vcpu = v;
+
+ for ( prev_item = &d->sched_item_list; *prev_item;
+ prev_item = &(*prev_item)->next_in_list )
+ if ( (*prev_item)->next_in_list &&
+ (*prev_item)->next_in_list->vcpu->vcpu_id > v->vcpu_id )
+ break;
+
+ item->next_in_list = *prev_item;
+ *prev_item = item;
+
+ return item;
+}
+
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
struct domain *d = v->domain;
@@ -256,10 +302,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
v->processor = processor;
- if ( (item = xzalloc(struct sched_item)) == NULL )
+ if ( (item = sched_alloc_item(v)) == NULL )
return 1;
- v->sched_item = item;
- item->vcpu = v;
/* Initialise the per-vcpu timers. */
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
@@ -272,8 +316,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
item->priv = sched_alloc_vdata(dom_scheduler(d), item, d->sched_priv);
if ( item->priv == NULL )
{
- v->sched_item = NULL;
- xfree(item);
+ sched_free_item(item);
return 1;
}
@@ -416,8 +459,7 @@ void sched_destroy_vcpu(struct vcpu *v)
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_item(vcpu_scheduler(v), item);
sched_free_vdata(vcpu_scheduler(v), item->priv);
- xfree(item);
- v->sched_item = NULL;
+ sched_free_item(item);
}
int sched_init_domain(struct domain *d, int poolid)
@@ -279,8 +279,16 @@ struct vcpu
struct sched_item {
struct vcpu *vcpu;
void *priv; /* scheduler private data */
+ struct sched_item *next_in_list;
};
+#define for_each_sched_item(d, e) \
+ for ( (e) = (d)->sched_item_list; (e) != NULL; (e) = (e)->next_in_list )
+
+#define for_each_sched_item_vcpu(i, v) \
+ for ( (v) = (i)->vcpu; (v) != NULL && (v)->sched_item == (i); \
+ (v) = (v)->next_in_list )
+
/* Per-domain lock can be recursively acquired in fault handlers. */
#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
@@ -339,6 +347,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
+ struct sched_item *sched_item_list;
struct cpupool *cpupool;
struct domain *next_in_list;
In order to make it easy to iterate over sched_item elements of a domain build a single linked list and add an iterator for it. The new list is guarded by the same mechanisms as the vcpu linked list as it is modified only via vcpu_create() or vcpu_destroy(). For completeness add another iterator for_each_sched_item_vcpu() which will iterate over all vcpus if a sched_item (right now only one). This will be needed later for larger scheduling granularity (e.g. cores). Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/schedule.c | 56 ++++++++++++++++++++++++++++++++++++++++++------- xen/include/xen/sched.h | 9 ++++++++ 2 files changed, 58 insertions(+), 7 deletions(-)