@@ -506,6 +506,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
* walking the kernel pages tables or page tables for firmware.
+ *
+ * Either mm or pgd may be NULL, but not both.
*/
int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
@@ -520,10 +522,11 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
.no_vma = true
};
- if (start >= end || !walk.mm)
+ if (start >= end || (!walk.mm && !walk.pgd))
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ if (walk.mm)
+ mmap_assert_locked(walk.mm);
return walk_pgd_range(start, end, &walk);
}
Since e47690d756a7 ("x86: mm: avoid allocating struct mm_struct on the stack") a pgd can be passed to walk_page_range_novma(). In case it is set no place in the pagewalk code use the walk.mm anymore, so permit to pass a NULL mm instead. It is up to the caller to ensure proper locking on the pgd in this case. Signed-off-by: Rolf Eike Beer <eb@emlix.com> --- mm/pagewalk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)