@@ -1460,6 +1460,15 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
+extern vm_fault_t __prepare_mm_fault(struct vm_area_struct *vma,
+ unsigned int flags);
+static inline vm_fault_t prepare_mm_fault(struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ if (likely(vma->anon_vma))
+ return 0;
+ return __prepare_mm_fault(vma, flags);
+}
extern vm_fault_t handle_mm_fault_range(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct mm_lock_range *range);
@@ -1477,6 +1486,11 @@ void unmap_mapping_pages(struct address_space *mapping,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
+static inline vm_fault_t prepare_mm_fault(struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ return 0;
+}
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
@@ -4129,6 +4129,32 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return handle_pte_fault(&vmf);
}
+vm_fault_t __prepare_mm_fault(struct vm_area_struct *vma, unsigned int flags)
+{
+ vm_fault_t ret = 0;
+
+ if (vma_is_anonymous(vma) ||
+ ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) ||
+ (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_MAYSHARE))) {
+ if (flags & FAULT_FLAG_USER)
+ mem_cgroup_enter_user_fault();
+ if (unlikely(__anon_vma_prepare(vma)))
+ ret = VM_FAULT_OOM;
+ if (flags & FAULT_FLAG_USER) {
+ mem_cgroup_exit_user_fault();
+ /*
+ * The task may have entered a memcg OOM situation but
+ * if the allocation error was handled gracefully (no
+ * VM_FAULT_OOM), there is no need to kill anything.
+ * Just clean up the OOM state peacefully.
+ */
+ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+ mem_cgroup_oom_synchronize(false);
+ }
+ }
+ return ret;
+}
+
/*
* By the time we get here, we already hold the mm semaphore
*
Add a prepare_mm_fault() function, which may allocate an anon_vma if required for the incoming fault. This is because the anon_vma must be allocated in the vma of record, while in the range locked case, the fault will operate on a pseudo-vma. Signed-off-by: Michel Lespinasse <walken@google.com> --- include/linux/mm.h | 14 ++++++++++++++ mm/memory.c | 26 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+)