@@ -59,7 +59,7 @@ integer_param("sched_ratelimit_us", sched_ratelimit_us);
enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
unsigned int __read_mostly sched_granularity = 1;
bool __read_mostly sched_disable_smt_switching;
-const cpumask_t *sched_res_mask = &cpumask_all;
+cpumask_var_t sched_res_mask;
/* Common lock for free cpus. */
static DEFINE_SPINLOCK(sched_free_cpu_lock);
@@ -2377,8 +2377,14 @@ static int cpu_schedule_up(unsigned int cpu)
sd = xzalloc(struct sched_resource);
if ( sd == NULL )
return -ENOMEM;
+ if ( !zalloc_cpumask_var(&sd->cpus) )
+ {
+ xfree(sd);
+ return -ENOMEM;
+ }
+
sd->processor = cpu;
- sd->cpus = cpumask_of(cpu);
+ cpumask_copy(sd->cpus, cpumask_of(cpu));
set_sched_res(cpu, sd);
sd->scheduler = &sched_idle_ops;
@@ -2390,6 +2396,8 @@ static int cpu_schedule_up(unsigned int cpu)
/* We start with cpu granularity. */
sd->granularity = 1;
+ cpumask_set_cpu(cpu, sched_res_mask);
+
/* Boot CPU is dealt with later in scheduler_init(). */
if ( cpu == 0 )
return 0;
@@ -2422,6 +2430,7 @@ static void sched_res_free(struct rcu_head *head)
{
struct sched_resource *sd = container_of(head, struct sched_resource, rcu);
+ free_cpumask_var(sd->cpus);
xfree(sd);
}
@@ -2570,6 +2579,9 @@ void __init scheduler_init(void)
printk("Using '%s' (%s)\n", ops.name, ops.opt_name);
}
+ if ( !zalloc_cpumask_var(&sched_res_mask) )
+ BUG();
+
if ( cpu_schedule_up(0) )
BUG();
register_cpu_notifier(&cpu_schedule_nfb);
@@ -24,7 +24,7 @@ extern cpumask_t cpupool_free_cpus;
extern int sched_ratelimit_us;
/* Scheduling resource mask. */
-extern const cpumask_t *sched_res_mask;
+extern cpumask_var_t sched_res_mask;
/* Number of vcpus per struct sched_unit. */
enum sched_gran {
@@ -58,7 +58,7 @@ struct sched_resource {
atomic_t urgent_count; /* how many urgent vcpus */
unsigned int processor;
unsigned int granularity;
- const cpumask_t *cpus; /* cpus covered by this struct */
+ cpumask_var_t cpus; /* cpus covered by this struct */
struct rcu_head rcu;
};
Prepare supporting multiple cpus per scheduling resource by allocating the cpumask per resource dynamically. Modify sched_res_mask to have only one bit per scheduling resource set. Signed-off-by: Juergen Gross <jgross@suse.com> --- V1: new patch (carved out from other patch) --- xen/common/schedule.c | 16 ++++++++++++++-- xen/include/xen/sched-if.h | 4 ++-- 2 files changed, 16 insertions(+), 4 deletions(-)