@@ -283,7 +283,7 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
cpupool_cpu_moving = NULL;
}
cpumask_set_cpu(cpu, c->cpu_valid);
- cpumask_and(c->res_valid, c->cpu_valid, sched_res_mask);
+ cpumask_and(c->res_valid, c->cpu_valid, &sched_res_mask);
rcu_read_lock(&domlist_read_lock);
for_each_domain_in_cpupool(d, c)
@@ -376,7 +376,7 @@ static int cpupool_unassign_cpu_start(struct cpupool *c, unsigned int cpu)
atomic_inc(&c->refcnt);
cpupool_cpu_moving = c;
cpumask_clear_cpu(cpu, c->cpu_valid);
- cpumask_and(c->res_valid, c->cpu_valid, sched_res_mask);
+ cpumask_and(c->res_valid, c->cpu_valid, &sched_res_mask);
out:
spin_unlock(&cpupool_lock);
@@ -61,7 +61,7 @@ integer_param("sched_ratelimit_us", sched_ratelimit_us);
/* Number of vcpus per struct sched_unit. */
bool __read_mostly sched_disable_smt_switching;
-const cpumask_t *sched_res_mask = &cpumask_all;
+cpumask_t sched_res_mask;
/* Common lock for free cpus. */
static DEFINE_SPINLOCK(sched_free_cpu_lock);
@@ -2411,8 +2411,14 @@ static int cpu_schedule_up(unsigned int cpu)
sr = xzalloc(struct sched_resource);
if ( sr == NULL )
return -ENOMEM;
+ if ( !zalloc_cpumask_var(&sr->cpus) )
+ {
+ xfree(sr);
+ return -ENOMEM;
+ }
+
sr->master_cpu = cpu;
- sr->cpus = cpumask_of(cpu);
+ cpumask_copy(sr->cpus, cpumask_of(cpu));
set_sched_res(cpu, sr);
sr->scheduler = &sched_idle_ops;
@@ -2424,6 +2430,8 @@ static int cpu_schedule_up(unsigned int cpu)
/* We start with cpu granularity. */
sr->granularity = 1;
+ cpumask_set_cpu(cpu, &sched_res_mask);
+
/* Boot CPU is dealt with later in scheduler_init(). */
if ( cpu == 0 )
return 0;
@@ -2456,6 +2464,7 @@ static void sched_res_free(struct rcu_head *head)
{
struct sched_resource *sr = container_of(head, struct sched_resource, rcu);
+ free_cpumask_var(sr->cpus);
xfree(sr);
}
@@ -2469,7 +2478,9 @@ static void cpu_schedule_down(unsigned int cpu)
kill_timer(&sr->s_timer);
+ cpumask_clear_cpu(cpu, &sched_res_mask);
set_sched_res(cpu, NULL);
+
call_rcu(&sr->rcu, sched_res_free);
rcu_read_unlock(&sched_res_rculock);
@@ -24,7 +24,7 @@ extern cpumask_t cpupool_free_cpus;
extern int sched_ratelimit_us;
/* Scheduling resource mask. */
-extern const cpumask_t *sched_res_mask;
+extern cpumask_t sched_res_mask;
/* Number of vcpus per struct sched_unit. */
enum sched_gran {
@@ -57,7 +57,7 @@ struct sched_resource {
/* Cpu with lowest id in scheduling resource. */
unsigned int master_cpu;
unsigned int granularity;
- const cpumask_t *cpus; /* cpus covered by this struct */
+ cpumask_var_t cpus; /* cpus covered by this struct */
struct rcu_head rcu;
};
Prepare supporting multiple cpus per scheduling resource by allocating the cpumask per resource dynamically. Modify sched_res_mask to have only one bit per scheduling resource set. Signed-off-by: Juergen Gross <jgross@suse.com> --- V1: new patch (carved out from other patch) V4: - use cpumask_t for sched_res_mask (Jan Beulich) - clear cpu in sched_res_mask when taking cpu away (Jan Beulich) --- xen/common/cpupool.c | 4 ++-- xen/common/schedule.c | 15 +++++++++++++-- xen/include/xen/sched-if.h | 4 ++-- 3 files changed, 17 insertions(+), 6 deletions(-)