@@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter);
#ifdef CONFIG_MMU
#define MMAP_LOTSAMISS (100)
-static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
- struct file *fpin)
-{
- int flags = vmf->flags;
-
- if (fpin)
- return fpin;
-
- /*
- * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
- * anything, so we only pin the file and drop the mmap_sem if only
- * FAULT_FLAG_ALLOW_RETRY is set.
- */
- if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
- FAULT_FLAG_ALLOW_RETRY) {
- fpin = get_file(vmf->vma->vm_file);
- up_read(&vmf->vma->vm_mm->mmap_sem);
- }
- return fpin;
-}
-
/*
* lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
* @vmf - the vm_fault for this fault.
@@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm
return max(start, vma->vm_start);
}
+static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+ struct file *fpin)
+{
+ int flags = vmf->flags;
+
+ if (fpin)
+ return fpin;
+
+ /*
+ * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
+ * anything, so we only pin the file and drop the mmap_sem if only
+ * FAULT_FLAG_ALLOW_RETRY is set.
+ */
+ if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+ FAULT_FLAG_ALLOW_RETRY) {
+ fpin = get_file(vmf->vma->vm_file);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
+ }
+ return fpin;
+}
+
#else /* !CONFIG_MMU */
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
@@ -2289,10 +2289,11 @@ static vm_fault_t do_page_mkwrite(struct
*
* The function expects the page to be locked and unlocks it.
*/
-static void fault_dirty_shared_page(struct vm_area_struct *vma,
- struct page *page)
+static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
+ struct page *page = vmf->page;
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
@@ -2307,16 +2308,30 @@ static void fault_dirty_shared_page(stru
mapping = page_rmapping(page);
unlock_page(page);
+ if (!page_mkwrite)
+ file_update_time(vma->vm_file);
+
+ /*
+ * Throttle page dirtying rate down to writeback speed.
+ *
+ * mapping may be NULL here because some device drivers do not
+ * set page.mapping but still dirty their pages
+ *
+ * Drop the mmap_sem before waiting on IO, if we can. The file
+ * is pinning the mapping, as per above.
+ */
if ((dirtied || page_mkwrite) && mapping) {
- /*
- * Some device drivers do not set page.mapping
- * but still dirty their pages
- */
+ struct file *fpin;
+
+ fpin = maybe_unlock_mmap_for_io(vmf, NULL);
balance_dirty_pages_ratelimited(mapping);
+ if (fpin) {
+ fput(fpin);
+ return VM_FAULT_RETRY;
+ }
}
- if (!page_mkwrite)
- file_update_time(vma->vm_file);
+ return 0;
}
/*
@@ -2571,6 +2586,7 @@ static vm_fault_t wp_page_shared(struct
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_WRITE;
get_page(vmf->page);
@@ -2594,10 +2610,10 @@ static vm_fault_t wp_page_shared(struct
wp_page_reuse(vmf);
lock_page(vmf->page);
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
put_page(vmf->page);
- return VM_FAULT_WRITE;
+ return ret;
}
/*
@@ -3641,7 +3657,7 @@ static vm_fault_t do_shared_fault(struct
return ret;
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
return ret;
}