@@ -1252,7 +1252,7 @@ struct task_struct {
#endif
#ifdef CONFIG_NUMA
/* Protected by alloc_lock: */
- struct mempolicy *mempolicy;
+ struct mempolicy __rcu *mempolicy;
short il_prev;
short pref_node_fork;
#endif
@@ -894,8 +894,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
goto out;
}
- old = current->mempolicy;
- current->mempolicy = new;
+ old = rcu_replace_pointer(current->mempolicy, new, true);
if (new && new->mode == MPOL_INTERLEAVE)
current->il_prev = MAX_NUMNODES-1;
task_unlock(current);
@@ -999,7 +998,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
if (err < 0)
goto out;
*policy = err;
- } else if (pol == current->mempolicy &&
+ } else if (pol == rcu_access_pointer(current->mempolicy) &&
pol->mode == MPOL_INTERLEAVE) {
*policy = next_node_in(current->il_prev, pol->nodes);
} else {
@@ -2065,7 +2064,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
{
struct mempolicy *mempolicy;
- if (!(mask && current->mempolicy))
+ if (!(mask && rcu_access_pointer(current->mempolicy)))
return false;
task_lock(current);
@@ -2426,7 +2425,7 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
return ERR_PTR(-ENOMEM);
/* task's mempolicy is protected by alloc_lock */
- if (old == current->mempolicy) {
+ if (old == rcu_access_pointer(current->mempolicy)) {
task_lock(current);
*new = *old;
task_unlock(current);
@@ -3052,7 +3052,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
nid_alloc = nid_here = numa_mem_id();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_slab_spread_node();
- else if (current->mempolicy)
+ else if (rcu_access_pointer(current->mempolicy))
nid_alloc = mempolicy_slab_node();
if (nid_alloc != nid_here)
return ____cache_alloc_node(cachep, flags, nid_alloc);
@@ -3188,7 +3188,8 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
int slab_node = numa_mem_id();
if (nodeid == NUMA_NO_NODE) {
- if (current->mempolicy || cpuset_do_slab_mem_spread()) {
+ if (rcu_access_pointer(current->mempolicy) ||
+ cpuset_do_slab_mem_spread()) {
objp = alternate_node_alloc(cachep, flags);
if (objp)
goto out;
The task->mempolicy is protected by task_lock in slow path, but there is no locking and reference in hot path for performance. It will be difficult if other processes want to adjust it. It is for these reasons to add __rcu symbol for task mempolicy. There is no need to add RCU protection to vma mempolicy, which is protected by mmap_lock. Suggested-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Zhongkun He <hezhongkun.hzk@bytedance.com> --- include/linux/sched.h | 2 +- mm/mempolicy.c | 9 ++++----- mm/slab.c | 5 +++-- 3 files changed, 8 insertions(+), 8 deletions(-)