@@ -88,7 +88,8 @@ int hugetlb_report_node_meminfo(int, char *);
void hugetlb_show_meminfo(void);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
+ unsigned long address, unsigned int flags,
+ struct mm_lock_range *range);
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
@@ -307,7 +308,7 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+ unsigned int flags, struct mm_lock_range *range)
{
BUG();
return 0;
@@ -1460,8 +1460,15 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
-extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
+extern vm_fault_t handle_mm_fault_range(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ struct mm_lock_range *range);
+static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
+{
+ return handle_mm_fault_range(vma, address, flags,
+ mm_coarse_lock_range());
+}
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
@@ -3788,7 +3788,8 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
- unsigned long address, pte_t *ptep, unsigned int flags)
+ unsigned long address, pte_t *ptep, unsigned int flags,
+ struct mm_lock_range *range)
{
struct hstate *h = hstate_vma(vma);
vm_fault_t ret = VM_FAULT_SIGBUS;
@@ -3831,7 +3832,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
.vma = vma,
.address = haddr,
.flags = flags,
- .range = mm_coarse_lock_range(),
+ .range = range,
/*
* Hard to debug if it ends up being
* used by a callee that assumes
@@ -3997,7 +3998,8 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
#endif
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct mm_lock_range *range)
{
pte_t *ptep, entry;
spinlock_t *ptl;
@@ -4039,7 +4041,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
- ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
+ ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
+ flags, range);
goto out_mutex;
}
@@ -4348,7 +4351,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
- ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
+ ret = hugetlb_fault(mm, vma, vaddr, fault_flags,
+ mm_coarse_lock_range());
if (ret & VM_FAULT_ERROR) {
err = vm_fault_to_errno(ret, flags);
remainder = 0;
@@ -4039,7 +4039,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct mm_lock_range *range)
{
struct vm_fault vmf = {
.vma = vma,
@@ -4047,7 +4048,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
.flags = flags,
.pgoff = linear_page_index(vma, address),
.gfp_mask = __get_fault_gfp_mask(vma),
- .range = mm_coarse_lock_range(),
+ .range = range,
};
unsigned int dirty = flags & FAULT_FLAG_WRITE;
struct mm_struct *mm = vma->vm_mm;
@@ -4134,8 +4135,9 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
-vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+vm_fault_t handle_mm_fault_range(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ struct mm_lock_range *range)
{
vm_fault_t ret;
@@ -4160,9 +4162,9 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
mem_cgroup_enter_user_fault();
if (unlikely(is_vm_hugetlb_page(vma)))
- ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
+ ret = hugetlb_fault(vma->vm_mm, vma, address, flags, range);
else
- ret = __handle_mm_fault(vma, address, flags);
+ ret = __handle_mm_fault(vma, address, flags, range);
if (flags & FAULT_FLAG_USER) {
mem_cgroup_exit_user_fault();
@@ -4178,7 +4180,7 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
return ret;
}
-EXPORT_SYMBOL_GPL(handle_mm_fault);
+EXPORT_SYMBOL_GPL(handle_mm_fault_range);
#ifndef __PAGETABLE_P4D_FOLDED
/*
This change adds a new handle_mm_fault_range() function, which behaves like handle_mm_fault() but specifies an explicit MM lock range. handle_mm_fault() remains as an inline wrapper which passes the default coarse locking range. Signed-off-by: Michel Lespinasse <walken@google.com> --- include/linux/hugetlb.h | 5 +++-- include/linux/mm.h | 11 +++++++++-- mm/hugetlb.c | 14 +++++++++----- mm/memory.c | 16 +++++++++------- 4 files changed, 30 insertions(+), 16 deletions(-)