@@ -451,6 +451,7 @@ struct vm_fault {
* page table to avoid allocation from
* atomic context.
*/
+ struct mm_lock_range *range; /* MM read lock range. */
};
/* page entry size for vm->huge_fault() */
@@ -3831,6 +3831,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
.vma = vma,
.address = haddr,
.flags = flags,
+ .range = mm_coarse_lock_range(),
/*
* Hard to debug if it ends up being
* used by a callee that assumes
@@ -900,6 +900,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
.flags = FAULT_FLAG_ALLOW_RETRY,
.pmd = pmd,
.pgoff = linear_page_index(vma, address),
+ .range = mm_coarse_lock_range(),
};
/* we only decide to swapin, if there is enough young ptes */
@@ -4047,6 +4047,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
.flags = flags,
.pgoff = linear_page_index(vma, address),
.gfp_mask = __get_fault_gfp_mask(vma),
+ .range = mm_coarse_lock_range(),
};
unsigned int dirty = flags & FAULT_FLAG_WRITE;
struct mm_struct *mm = vma->vm_mm;
Add a range field to struct vm_fault. This carries the range that was locked for the given fault. Faults that release the mmap_sem should pass the specified range. Signed-off-by: Michel Lespinasse <walken@google.com> --- include/linux/mm.h | 1 + mm/hugetlb.c | 1 + mm/khugepaged.c | 1 + mm/memory.c | 1 + 4 files changed, 4 insertions(+)