@@ -1489,7 +1489,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct mempolicy *new;
+ struct mempolicy *new, *old;
unsigned long vmstart;
unsigned long vmend;
unsigned long end;
@@ -1521,31 +1521,27 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
return 0;
mmap_write_lock(mm);
for_each_vma_range(vmi, vma, end) {
- vmstart = max(start, vma->vm_start);
- vmend = min(end, vma->vm_end);
- new = mpol_dup(vma_policy(vma));
- if (IS_ERR(new)) {
- err = PTR_ERR(new);
- break;
- }
- /*
- * Only update home node if there is an existing vma policy
- */
- if (!new)
- continue;
-
/*
* If any vma in the range got policy other than MPOL_BIND
* or MPOL_PREFERRED_MANY we return error. We don't reset
* the home node for vmas we already updated before.
*/
- if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
- mpol_put(new);
+ old = vma_policy(vma);
+ if (!old)
+ continue;
+ if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP;
break;
}
+ new = mpol_dup(old);
+ if (IS_ERR(new)) {
+ err = PTR_ERR(new);
+ break;
+ }
new->home_node = home_node;
+ vmstart = max(start, vma->vm_start);
+ vmend = min(end, vma->vm_end);
err = mbind_range(mm, vmstart, vmend, new);
mpol_put(new);
if (err)