@@ -1824,9 +1824,28 @@ int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
return ret;
}
-void domain_update_node_affinity(struct domain *d)
+bool alloc_affinity_masks(struct affinity_masks *affinity)
{
- cpumask_var_t dom_cpumask, dom_cpumask_soft;
+ if ( !alloc_cpumask_var(&affinity->hard) )
+ return false;
+ if ( !alloc_cpumask_var(&affinity->soft) )
+ {
+ free_cpumask_var(affinity->hard);
+ return false;
+ }
+
+ return true;
+}
+
+void free_affinity_masks(struct affinity_masks *affinity)
+{
+ free_cpumask_var(affinity->soft);
+ free_cpumask_var(affinity->hard);
+}
+
+void domain_update_node_aff(struct domain *d, struct affinity_masks *affinity)
+{
+ struct affinity_masks masks;
cpumask_t *dom_affinity;
const cpumask_t *online;
struct sched_unit *unit;
@@ -1836,14 +1855,16 @@ void domain_update_node_affinity(struct domain *d)
if ( !d->vcpu || !d->vcpu[0] )
return;
- if ( !zalloc_cpumask_var(&dom_cpumask) )
- return;
- if ( !zalloc_cpumask_var(&dom_cpumask_soft) )
+ if ( !affinity )
{
- free_cpumask_var(dom_cpumask);
- return;
+ affinity = &masks;
+ if ( !alloc_affinity_masks(affinity) )
+ return;
}
+ cpumask_clear(affinity->hard);
+ cpumask_clear(affinity->soft);
+
online = cpupool_domain_master_cpumask(d);
spin_lock(&d->node_affinity_lock);
@@ -1864,22 +1885,21 @@ void domain_update_node_affinity(struct domain *d)
*/
for_each_sched_unit ( d, unit )
{
- cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity);
- cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
- unit->cpu_soft_affinity);
+ cpumask_or(affinity->hard, affinity->hard, unit->cpu_hard_affinity);
+ cpumask_or(affinity->soft, affinity->soft, unit->cpu_soft_affinity);
}
/* Filter out non-online cpus */
- cpumask_and(dom_cpumask, dom_cpumask, online);
- ASSERT(!cpumask_empty(dom_cpumask));
+ cpumask_and(affinity->hard, affinity->hard, online);
+ ASSERT(!cpumask_empty(affinity->hard));
/* And compute the intersection between hard, online and soft */
- cpumask_and(dom_cpumask_soft, dom_cpumask_soft, dom_cpumask);
+ cpumask_and(affinity->soft, affinity->soft, affinity->hard);
/*
* If not empty, the intersection of hard, soft and online is the
* narrowest set we want. If empty, we fall back to hard&online.
*/
- dom_affinity = cpumask_empty(dom_cpumask_soft) ?
- dom_cpumask : dom_cpumask_soft;
+ dom_affinity = cpumask_empty(affinity->soft) ? affinity->hard
+ : affinity->soft;
nodes_clear(d->node_affinity);
for_each_cpu ( cpu, dom_affinity )
@@ -1888,8 +1908,8 @@ void domain_update_node_affinity(struct domain *d)
spin_unlock(&d->node_affinity_lock);
- free_cpumask_var(dom_cpumask_soft);
- free_cpumask_var(dom_cpumask);
+ if ( affinity == &masks )
+ free_affinity_masks(affinity);
}
typedef long ret_t;
@@ -410,6 +410,25 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c)
return ret;
}
+/* Update affinities of all domains in a cpupool. */
+static void cpupool_update_node_affinity(const struct cpupool *c)
+{
+ struct affinity_masks masks;
+ struct domain *d;
+
+ if ( !alloc_affinity_masks(&masks) )
+ return;
+
+ rcu_read_lock(&domlist_read_lock);
+
+ for_each_domain_in_cpupool(d, c)
+ domain_update_node_aff(d, &masks);
+
+ rcu_read_unlock(&domlist_read_lock);
+
+ free_affinity_masks(&masks);
+}
+
/*
* assign a specific cpu to a cpupool
* cpupool_lock must be held
@@ -417,7 +436,6 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c)
static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
{
int ret;
- struct domain *d;
const cpumask_t *cpus;
cpus = sched_get_opt_cpumask(c->gran, cpu);
@@ -442,12 +460,7 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
rcu_read_unlock(&sched_res_rculock);
- rcu_read_lock(&domlist_read_lock);
- for_each_domain_in_cpupool(d, c)
- {
- domain_update_node_affinity(d);
- }
- rcu_read_unlock(&domlist_read_lock);
+ cpupool_update_node_affinity(c);
return 0;
}
@@ -456,18 +469,14 @@ static int cpupool_unassign_cpu_finish(struct cpupool *c)
{
int cpu = cpupool_moving_cpu;
const cpumask_t *cpus;
- struct domain *d;
int ret;
if ( c != cpupool_cpu_moving )
return -EADDRNOTAVAIL;
- /*
- * We need this for scanning the domain list, both in
- * cpu_disable_scheduler(), and at the bottom of this function.
- */
rcu_read_lock(&domlist_read_lock);
ret = cpu_disable_scheduler(cpu);
+ rcu_read_unlock(&domlist_read_lock);
rcu_read_lock(&sched_res_rculock);
cpus = get_sched_res(cpu)->cpus;
@@ -494,11 +503,7 @@ static int cpupool_unassign_cpu_finish(struct cpupool *c)
}
rcu_read_unlock(&sched_res_rculock);
- for_each_domain_in_cpupool(d, c)
- {
- domain_update_node_affinity(d);
- }
- rcu_read_unlock(&domlist_read_lock);
+ cpupool_update_node_affinity(c);
return ret;
}
@@ -593,6 +593,13 @@ affinity_balance_cpumask(const struct sched_unit *unit, int step,
cpumask_copy(mask, unit->cpu_hard_affinity);
}
+struct affinity_masks {
+ cpumask_var_t hard;
+ cpumask_var_t soft;
+};
+
+bool alloc_affinity_masks(struct affinity_masks *affinity);
+void free_affinity_masks(struct affinity_masks *affinity);
void sched_rm_cpu(unsigned int cpu);
const cpumask_t *sched_get_opt_cpumask(enum sched_gran opt, unsigned int cpu);
void schedule_dump(struct cpupool *c);
@@ -666,8 +666,15 @@ static inline void get_knownalive_domain(struct domain *d)
ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
}
+struct affinity_masks;
+
int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity);
-void domain_update_node_affinity(struct domain *d);
+void domain_update_node_aff(struct domain *d, struct affinity_masks *affinity);
+
+static inline void domain_update_node_affinity(struct domain *d)
+{
+ domain_update_node_aff(d, NULL);
+}
/*
* To be implemented by each architecture, sanity checking the configuration