@@ -9,7 +9,6 @@
#include <xen/libelf.h>
#include <xen/pfn.h>
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <asm/amd.h>
@@ -227,9 +226,9 @@ unsigned int __init dom0_max_vcpus(void)
dom0_nodes = node_online_map;
for_each_node_mask ( node, dom0_nodes )
cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
- cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
+ cpumask_and(&dom0_cpus, &dom0_cpus, cpupool_valid_cpus(cpupool0));
if ( cpumask_empty(&dom0_cpus) )
- cpumask_copy(&dom0_cpus, cpupool0->cpu_valid);
+ cpumask_copy(&dom0_cpus, cpupool_valid_cpus(cpupool0));
max_vcpus = cpumask_weight(&dom0_cpus);
if ( opt_dom0_max_vcpus_min > max_vcpus )
@@ -10,7 +10,6 @@
#include <xen/ctype.h>
#include <xen/err.h>
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/domain.h>
#include <xen/mm.h>
#include <xen/event.h>
@@ -565,75 +564,6 @@ void __init setup_system_domains(void)
#endif
}
-void domain_update_node_affinity(struct domain *d)
-{
- cpumask_var_t dom_cpumask, dom_cpumask_soft;
- cpumask_t *dom_affinity;
- const cpumask_t *online;
- struct sched_unit *unit;
- unsigned int cpu;
-
- /* Do we have vcpus already? If not, no need to update node-affinity. */
- if ( !d->vcpu || !d->vcpu[0] )
- return;
-
- if ( !zalloc_cpumask_var(&dom_cpumask) )
- return;
- if ( !zalloc_cpumask_var(&dom_cpumask_soft) )
- {
- free_cpumask_var(dom_cpumask);
- return;
- }
-
- online = cpupool_domain_master_cpumask(d);
-
- spin_lock(&d->node_affinity_lock);
-
- /*
- * If d->auto_node_affinity is true, let's compute the domain's
- * node-affinity and update d->node_affinity accordingly. if false,
- * just leave d->auto_node_affinity alone.
- */
- if ( d->auto_node_affinity )
- {
- /*
- * We want the narrowest possible set of pcpus (to get the narowest
- * possible set of nodes). What we need is the cpumask of where the
- * domain can run (the union of the hard affinity of all its vcpus),
- * and the full mask of where it would prefer to run (the union of
- * the soft affinity of all its various vcpus). Let's build them.
- */
- for_each_sched_unit ( d, unit )
- {
- cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity);
- cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
- unit->cpu_soft_affinity);
- }
- /* Filter out non-online cpus */
- cpumask_and(dom_cpumask, dom_cpumask, online);
- ASSERT(!cpumask_empty(dom_cpumask));
- /* And compute the intersection between hard, online and soft */
- cpumask_and(dom_cpumask_soft, dom_cpumask_soft, dom_cpumask);
-
- /*
- * If not empty, the intersection of hard, soft and online is the
- * narrowest set we want. If empty, we fall back to hard&online.
- */
- dom_affinity = cpumask_empty(dom_cpumask_soft) ?
- dom_cpumask : dom_cpumask_soft;
-
- nodes_clear(d->node_affinity);
- for_each_cpu ( cpu, dom_affinity )
- node_set(cpu_to_node(cpu), d->node_affinity);
- }
-
- spin_unlock(&d->node_affinity_lock);
-
- free_cpumask_var(dom_cpumask_soft);
- free_cpumask_var(dom_cpumask);
-}
-
-
int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity)
{
/* Being disjoint with the system is just wrong. */
@@ -11,7 +11,6 @@
#include <xen/err.h>
#include <xen/mm.h>
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/domain.h>
#include <xen/event.h>
#include <xen/grant_table.h>
@@ -65,9 +64,9 @@ static int bitmap_to_xenctl_bitmap(struct xenctl_bitmap *xenctl_bitmap,
return err;
}
-static int xenctl_bitmap_to_bitmap(unsigned long *bitmap,
- const struct xenctl_bitmap *xenctl_bitmap,
- unsigned int nbits)
+int xenctl_bitmap_to_bitmap(unsigned long *bitmap,
+ const struct xenctl_bitmap *xenctl_bitmap,
+ unsigned int nbits)
{
unsigned int guest_bytes, copy_bytes;
int err = 0;
@@ -200,7 +199,7 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info));
BUG_ON(SHARED_M2P(info->shared_info_frame));
- info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
+ info->cpupool = cpupool_get_id(d);
memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
@@ -234,16 +233,6 @@ void domctl_lock_release(void)
spin_unlock(¤t->domain->hypercall_deadlock_mutex);
}
-static inline
-int vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff)
-{
- return vcpuaff->flags == 0 ||
- ((vcpuaff->flags & XEN_VCPUAFFINITY_HARD) &&
- guest_handle_is_null(vcpuaff->cpumap_hard.bitmap)) ||
- ((vcpuaff->flags & XEN_VCPUAFFINITY_SOFT) &&
- guest_handle_is_null(vcpuaff->cpumap_soft.bitmap));
-}
-
void vnuma_destroy(struct vnuma_info *vnuma)
{
if ( vnuma )
@@ -608,122 +597,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
case XEN_DOMCTL_setvcpuaffinity:
case XEN_DOMCTL_getvcpuaffinity:
- {
- struct vcpu *v;
- const struct sched_unit *unit;
- struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
-
- ret = -EINVAL;
- if ( vcpuaff->vcpu >= d->max_vcpus )
- break;
-
- ret = -ESRCH;
- if ( (v = d->vcpu[vcpuaff->vcpu]) == NULL )
- break;
-
- unit = v->sched_unit;
- ret = -EINVAL;
- if ( vcpuaffinity_params_invalid(vcpuaff) )
- break;
-
- if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
- {
- cpumask_var_t new_affinity, old_affinity;
- cpumask_t *online = cpupool_domain_master_cpumask(v->domain);
-
- /*
- * We want to be able to restore hard affinity if we are trying
- * setting both and changing soft affinity (which happens later,
- * when hard affinity has been succesfully chaged already) fails.
- */
- if ( !alloc_cpumask_var(&old_affinity) )
- {
- ret = -ENOMEM;
- break;
- }
- cpumask_copy(old_affinity, unit->cpu_hard_affinity);
-
- if ( !alloc_cpumask_var(&new_affinity) )
- {
- free_cpumask_var(old_affinity);
- ret = -ENOMEM;
- break;
- }
-
- /* Undo a stuck SCHED_pin_override? */
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_FORCE )
- vcpu_temporary_affinity(v, NR_CPUS, VCPU_AFFINITY_OVERRIDE);
-
- ret = 0;
-
- /*
- * We both set a new affinity and report back to the caller what
- * the scheduler will be effectively using.
- */
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
- {
- ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity),
- &vcpuaff->cpumap_hard,
- nr_cpu_ids);
- if ( !ret )
- ret = vcpu_set_hard_affinity(v, new_affinity);
- if ( ret )
- goto setvcpuaffinity_out;
-
- /*
- * For hard affinity, what we return is the intersection of
- * cpupool's online mask and the new hard affinity.
- */
- cpumask_and(new_affinity, online, unit->cpu_hard_affinity);
- ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
- new_affinity);
- }
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
- {
- ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity),
- &vcpuaff->cpumap_soft,
- nr_cpu_ids);
- if ( !ret)
- ret = vcpu_set_soft_affinity(v, new_affinity);
- if ( ret )
- {
- /*
- * Since we're returning error, the caller expects nothing
- * happened, so we rollback the changes to hard affinity
- * (if any).
- */
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
- vcpu_set_hard_affinity(v, old_affinity);
- goto setvcpuaffinity_out;
- }
-
- /*
- * For soft affinity, we return the intersection between the
- * new soft affinity, the cpupool's online map and the (new)
- * hard affinity.
- */
- cpumask_and(new_affinity, new_affinity, online);
- cpumask_and(new_affinity, new_affinity,
- unit->cpu_hard_affinity);
- ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
- new_affinity);
- }
-
- setvcpuaffinity_out:
- free_cpumask_var(new_affinity);
- free_cpumask_var(old_affinity);
- }
- else
- {
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
- ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
- unit->cpu_hard_affinity);
- if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
- ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
- unit->cpu_soft_affinity);
- }
+ ret = vcpu_affinity_domctl(d, op->cmd, &op->u.vcpuaffinity);
break;
- }
case XEN_DOMCTL_scheduler_op:
ret = sched_adjust(d, &op->u.scheduler_op);
@@ -16,11 +16,12 @@
#include <xen/cpumask.h>
#include <xen/percpu.h>
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/warning.h>
#include <xen/keyhandler.h>
#include <xen/cpu.h>
+#include "sched-if.h"
+
#define for_each_cpupool(ptr) \
for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
@@ -876,6 +877,16 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
return ret;
}
+int cpupool_get_id(const struct domain *d)
+{
+ return d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
+}
+
+cpumask_t *cpupool_valid_cpus(struct cpupool *pool)
+{
+ return pool->cpu_valid;
+}
+
void dump_runq(unsigned char key)
{
unsigned long flags;
similarity index 99%
rename from xen/include/xen/sched-if.h
rename to xen/common/sched/sched-if.h
@@ -12,9 +12,6 @@
#include <xen/err.h>
#include <xen/rcupdate.h>
-/* A global pointer to the initial cpupool (POOL0). */
-extern struct cpupool *cpupool0;
-
/* cpus currently in no cpupool */
extern cpumask_t cpupool_free_cpus;
@@ -26,7 +26,6 @@
#include <xen/lib.h>
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/timer.h>
#include <xen/softirq.h>
#include <xen/time.h>
@@ -35,6 +34,8 @@
#include <xen/guest_access.h>
#include <public/sysctl.h>
+#include "sched-if.h"
+
/**************************************************************************
* Private Macros *
**************************************************************************/
@@ -15,7 +15,6 @@
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <asm/atomic.h>
#include <asm/div64.h>
@@ -24,6 +23,7 @@
#include <xen/trace.h>
#include <xen/err.h>
+#include "sched-if.h"
/*
* Locking:
@@ -18,7 +18,6 @@
#include <xen/event.h>
#include <xen/time.h>
#include <xen/perfc.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <asm/div64.h>
#include <xen/errno.h>
@@ -26,6 +25,8 @@
#include <xen/cpu.h>
#include <xen/keyhandler.h>
+#include "sched-if.h"
+
/* Meant only for helping developers during debugging. */
/* #define d2printk printk */
#define d2printk(x...)
@@ -29,10 +29,11 @@
*/
#include <xen/sched.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <xen/trace.h>
+#include "sched-if.h"
+
/*
* null tracing events. Check include/public/trace.h for more details.
*/
@@ -20,7 +20,6 @@
#include <xen/time.h>
#include <xen/timer.h>
#include <xen/perfc.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <asm/atomic.h>
#include <xen/errno.h>
@@ -31,6 +30,8 @@
#include <xen/err.h>
#include <xen/guest_access.h>
+#include "sched-if.h"
+
/*
* TODO:
*
@@ -23,7 +23,6 @@
#include <xen/time.h>
#include <xen/timer.h>
#include <xen/perfc.h>
-#include <xen/sched-if.h>
#include <xen/softirq.h>
#include <xen/trace.h>
#include <xen/mm.h>
@@ -38,6 +37,8 @@
#include <xsm/xsm.h>
#include <xen/err.h>
+#include "sched-if.h"
+
#ifdef CONFIG_XEN_GUEST
#include <asm/guest.h>
#else
@@ -1607,6 +1608,194 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
return ret;
}
+static inline
+int vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff)
+{
+ return vcpuaff->flags == 0 ||
+ ((vcpuaff->flags & XEN_VCPUAFFINITY_HARD) &&
+ guest_handle_is_null(vcpuaff->cpumap_hard.bitmap)) ||
+ ((vcpuaff->flags & XEN_VCPUAFFINITY_SOFT) &&
+ guest_handle_is_null(vcpuaff->cpumap_soft.bitmap));
+}
+
+int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
+ struct xen_domctl_vcpuaffinity *vcpuaff)
+{
+ struct vcpu *v;
+ const struct sched_unit *unit;
+ int ret = 0;
+
+ if ( vcpuaff->vcpu >= d->max_vcpus )
+ return -EINVAL;
+
+ if ( (v = d->vcpu[vcpuaff->vcpu]) == NULL )
+ return -ESRCH;
+
+ if ( vcpuaffinity_params_invalid(vcpuaff) )
+ return -EINVAL;
+
+ unit = v->sched_unit;
+
+ if ( cmd == XEN_DOMCTL_setvcpuaffinity )
+ {
+ cpumask_var_t new_affinity, old_affinity;
+ cpumask_t *online = cpupool_domain_master_cpumask(v->domain);
+
+ /*
+ * We want to be able to restore hard affinity if we are trying
+ * setting both and changing soft affinity (which happens later,
+ * when hard affinity has been succesfully chaged already) fails.
+ */
+ if ( !alloc_cpumask_var(&old_affinity) )
+ return -ENOMEM;
+
+ cpumask_copy(old_affinity, unit->cpu_hard_affinity);
+
+ if ( !alloc_cpumask_var(&new_affinity) )
+ {
+ free_cpumask_var(old_affinity);
+ return -ENOMEM;
+ }
+
+ /* Undo a stuck SCHED_pin_override? */
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_FORCE )
+ vcpu_temporary_affinity(v, NR_CPUS, VCPU_AFFINITY_OVERRIDE);
+
+ ret = 0;
+
+ /*
+ * We both set a new affinity and report back to the caller what
+ * the scheduler will be effectively using.
+ */
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
+ {
+ ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity),
+ &vcpuaff->cpumap_hard, nr_cpu_ids);
+ if ( !ret )
+ ret = vcpu_set_hard_affinity(v, new_affinity);
+ if ( ret )
+ goto setvcpuaffinity_out;
+
+ /*
+ * For hard affinity, what we return is the intersection of
+ * cpupool's online mask and the new hard affinity.
+ */
+ cpumask_and(new_affinity, online, unit->cpu_hard_affinity);
+ ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard, new_affinity);
+ }
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
+ {
+ ret = xenctl_bitmap_to_bitmap(cpumask_bits(new_affinity),
+ &vcpuaff->cpumap_soft, nr_cpu_ids);
+ if ( !ret)
+ ret = vcpu_set_soft_affinity(v, new_affinity);
+ if ( ret )
+ {
+ /*
+ * Since we're returning error, the caller expects nothing
+ * happened, so we rollback the changes to hard affinity
+ * (if any).
+ */
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
+ vcpu_set_hard_affinity(v, old_affinity);
+ goto setvcpuaffinity_out;
+ }
+
+ /*
+ * For soft affinity, we return the intersection between the
+ * new soft affinity, the cpupool's online map and the (new)
+ * hard affinity.
+ */
+ cpumask_and(new_affinity, new_affinity, online);
+ cpumask_and(new_affinity, new_affinity, unit->cpu_hard_affinity);
+ ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft, new_affinity);
+ }
+
+ setvcpuaffinity_out:
+ free_cpumask_var(new_affinity);
+ free_cpumask_var(old_affinity);
+ }
+ else
+ {
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
+ ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
+ unit->cpu_hard_affinity);
+ if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
+ ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
+ unit->cpu_soft_affinity);
+ }
+
+ return ret;
+}
+
+void domain_update_node_affinity(struct domain *d)
+{
+ cpumask_var_t dom_cpumask, dom_cpumask_soft;
+ cpumask_t *dom_affinity;
+ const cpumask_t *online;
+ struct sched_unit *unit;
+ unsigned int cpu;
+
+ /* Do we have vcpus already? If not, no need to update node-affinity. */
+ if ( !d->vcpu || !d->vcpu[0] )
+ return;
+
+ if ( !zalloc_cpumask_var(&dom_cpumask) )
+ return;
+ if ( !zalloc_cpumask_var(&dom_cpumask_soft) )
+ {
+ free_cpumask_var(dom_cpumask);
+ return;
+ }
+
+ online = cpupool_domain_master_cpumask(d);
+
+ spin_lock(&d->node_affinity_lock);
+
+ /*
+ * If d->auto_node_affinity is true, let's compute the domain's
+ * node-affinity and update d->node_affinity accordingly. if false,
+ * just leave d->auto_node_affinity alone.
+ */
+ if ( d->auto_node_affinity )
+ {
+ /*
+ * We want the narrowest possible set of pcpus (to get the narowest
+ * possible set of nodes). What we need is the cpumask of where the
+ * domain can run (the union of the hard affinity of all its vcpus),
+ * and the full mask of where it would prefer to run (the union of
+ * the soft affinity of all its various vcpus). Let's build them.
+ */
+ for_each_sched_unit ( d, unit )
+ {
+ cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity);
+ cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
+ unit->cpu_soft_affinity);
+ }
+ /* Filter out non-online cpus */
+ cpumask_and(dom_cpumask, dom_cpumask, online);
+ ASSERT(!cpumask_empty(dom_cpumask));
+ /* And compute the intersection between hard, online and soft */
+ cpumask_and(dom_cpumask_soft, dom_cpumask_soft, dom_cpumask);
+
+ /*
+ * If not empty, the intersection of hard, soft and online is the
+ * narrowest set we want. If empty, we fall back to hard&online.
+ */
+ dom_affinity = cpumask_empty(dom_cpumask_soft) ?
+ dom_cpumask : dom_cpumask_soft;
+
+ nodes_clear(d->node_affinity);
+ for_each_cpu ( cpu, dom_affinity )
+ node_set(cpu_to_node(cpu), d->node_affinity);
+ }
+
+ spin_unlock(&d->node_affinity_lock);
+
+ free_cpumask_var(dom_cpumask_soft);
+ free_cpumask_var(dom_cpumask);
+}
+
typedef long ret_t;
#endif /* !COMPAT */
@@ -27,6 +27,9 @@ struct xen_domctl_getdomaininfo;
void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info);
void arch_get_domain_info(const struct domain *d,
struct xen_domctl_getdomaininfo *info);
+int xenctl_bitmap_to_bitmap(unsigned long *bitmap,
+ const struct xenctl_bitmap *xenctl_bitmap,
+ unsigned int nbits);
/*
* Arch-specifics.
@@ -50,6 +50,9 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
/* A global pointer to the hardware domain (usually DOM0). */
extern struct domain *hardware_domain;
+/* A global pointer to the initial cpupool (POOL0). */
+extern struct cpupool *cpupool0;
+
#ifdef CONFIG_LATE_HWDOM
extern domid_t hardware_domid;
#else
@@ -929,6 +932,8 @@ int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason);
int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity);
int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity);
void restore_vcpu_affinity(struct domain *d);
+int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
+ struct xen_domctl_vcpuaffinity *vcpuaff);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
@@ -1054,6 +1059,8 @@ int cpupool_add_domain(struct domain *d, int poolid);
void cpupool_rm_domain(struct domain *d);
int cpupool_move_domain(struct domain *d, struct cpupool *c);
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
+int cpupool_get_id(const struct domain *d);
+cpumask_t *cpupool_valid_cpus(struct cpupool *pool);
void schedule_dump(struct cpupool *c);
extern void dump_runq(unsigned char key);
include/xen/sched-if.h should be private to scheduler code, so move it to common/sched/sched-if.h and move the remaining use cases to cpupool.c and schedule.c. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/arch/x86/dom0_build.c | 5 +- xen/common/domain.c | 70 ---------- xen/common/domctl.c | 135 +------------------ xen/common/sched/cpupool.c | 13 +- xen/{include/xen => common/sched}/sched-if.h | 3 - xen/common/sched/sched_arinc653.c | 3 +- xen/common/sched/sched_credit.c | 2 +- xen/common/sched/sched_credit2.c | 3 +- xen/common/sched/sched_null.c | 3 +- xen/common/sched/sched_rt.c | 3 +- xen/common/sched/schedule.c | 191 ++++++++++++++++++++++++++- xen/include/xen/domain.h | 3 + xen/include/xen/sched.h | 7 + 13 files changed, 228 insertions(+), 213 deletions(-) rename xen/{include/xen => common/sched}/sched-if.h (99%)