From patchwork Tue Sep 27 16:08:09 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Kara X-Patchwork-Id: 9352245 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id C8B35600CB for ; Tue, 27 Sep 2016 16:08:43 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id BA682286CD for ; Tue, 27 Sep 2016 16:08:43 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id AC24128979; Tue, 27 Sep 2016 16:08:43 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 472EB286CD for ; Tue, 27 Sep 2016 16:08:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S934052AbcI0QIl (ORCPT ); Tue, 27 Sep 2016 12:08:41 -0400 Received: from mx2.suse.de ([195.135.220.15]:37332 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755427AbcI0QIf (ORCPT ); Tue, 27 Sep 2016 12:08:35 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (charybdis-ext.suse.de [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 34C4B7501B; Tue, 27 Sep 2016 16:08:32 +0000 (UTC) Received: by quack2.suse.cz (Postfix, from userid 1000) id 0078E1E10E7; Tue, 27 Sep 2016 18:08:30 +0200 (CEST) From: Jan Kara To: linux-mm@kvack.org Cc: linux-fsdevel@vger.kernel.org, linux-nvdimm@lists.01.org, Dan Williams , Ross Zwisler , "Kirill A. Shutemov" , Jan Kara Subject: [PATCH 05/20] mm: Trim __do_fault() arguments Date: Tue, 27 Sep 2016 18:08:09 +0200 Message-Id: <1474992504-20133-6-git-send-email-jack@suse.cz> X-Mailer: git-send-email 2.6.6 In-Reply-To: <1474992504-20133-1-git-send-email-jack@suse.cz> References: <1474992504-20133-1-git-send-email-jack@suse.cz> Sender: linux-fsdevel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-fsdevel@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Use vm_fault structure to pass cow_page, page, and entry in and out of the function. That reduces number of __do_fault() arguments from 4 to 1. Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler --- mm/memory.c | 53 +++++++++++++++++++++++------------------------------ 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index b7f1f535e079..ba7760fb7db2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2844,26 +2844,22 @@ oom: * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ -static int __do_fault(struct vm_fault *vmf, struct page *cow_page, - struct page **page, void **entry) +static int __do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; int ret; - vmf->cow_page = cow_page; - ret = vma->vm_ops->fault(vma, vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - if (ret & VM_FAULT_DAX_LOCKED) { - *entry = vmf->entry; + if (ret & VM_FAULT_DAX_LOCKED) return ret; - } if (unlikely(PageHWPoison(vmf->page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf->page); put_page(vmf->page); + vmf->page = NULL; return VM_FAULT_HWPOISON; } @@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page, else VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); - *page = vmf->page; return ret; } @@ -3169,7 +3164,6 @@ out: static int do_read_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page; int ret = 0; /* @@ -3183,24 +3177,23 @@ static int do_read_fault(struct vm_fault *vmf) return ret; } - ret = __do_fault(vmf, NULL, &fault_page, NULL); + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - ret |= alloc_set_pte(vmf, NULL, fault_page); + ret |= alloc_set_pte(vmf, NULL, vmf->page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); - unlock_page(fault_page); + unlock_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) - put_page(fault_page); + put_page(vmf->page); return ret; } static int do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page, *new_page; - void *fault_entry; + struct page *new_page; struct mem_cgroup *memcg; int ret; @@ -3217,20 +3210,21 @@ static int do_cow_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } - ret = __do_fault(vmf, new_page, &fault_page, &fault_entry); + vmf->cow_page = new_page; + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (!(ret & VM_FAULT_DAX_LOCKED)) - copy_user_highpage(new_page, fault_page, vmf->address, vma); + copy_user_highpage(new_page, vmf->page, vmf->address, vma); __SetPageUptodate(new_page); ret |= alloc_set_pte(vmf, memcg, new_page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { - unlock_page(fault_page); - put_page(fault_page); + unlock_page(vmf->page); + put_page(vmf->page); } else { dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); } @@ -3246,12 +3240,11 @@ uncharge_out: static int do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page; struct address_space *mapping; int dirtied = 0; int ret, tmp; - ret = __do_fault(vmf, NULL, &fault_page, NULL); + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3260,26 +3253,26 @@ static int do_shared_fault(struct vm_fault *vmf) * about to become writable */ if (vma->vm_ops->page_mkwrite) { - unlock_page(fault_page); - tmp = do_page_mkwrite(vma, fault_page, vmf->address); + unlock_page(vmf->page); + tmp = do_page_mkwrite(vma, vmf->page, vmf->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - put_page(fault_page); + put_page(vmf->page); return tmp; } } - ret |= alloc_set_pte(vmf, NULL, fault_page); + ret |= alloc_set_pte(vmf, NULL, vmf->page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { - unlock_page(fault_page); - put_page(fault_page); + unlock_page(vmf->page); + put_page(vmf->page); return ret; } - if (set_page_dirty(fault_page)) + if (set_page_dirty(vmf->page)) dirtied = 1; /* * Take a local copy of the address_space - page.mapping may be zeroed @@ -3287,8 +3280,8 @@ static int do_shared_fault(struct vm_fault *vmf) * pinned by vma->vm_file's reference. We rely on unlock_page()'s * release semantics to prevent the compiler from undoing this copying. */ - mapping = page_rmapping(fault_page); - unlock_page(fault_page); + mapping = page_rmapping(vmf->page); + unlock_page(vmf->page); if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still