From patchwork Tue Dec 6 16:03:46 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Brodsky X-Patchwork-Id: 9462875 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 3780160236 for ; Tue, 6 Dec 2016 16:09:05 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 281092840E for ; Tue, 6 Dec 2016 16:09:05 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 1C58728437; Tue, 6 Dec 2016 16:09:05 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.2 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 7E6C12840E for ; Tue, 6 Dec 2016 16:09:04 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.85_2 #1 (Red Hat Linux)) id 1cEIH0-0006PL-SD; Tue, 06 Dec 2016 16:07:18 +0000 Received: from foss.arm.com ([217.140.101.70]) by bombadil.infradead.org with esmtp (Exim 4.85_2 #1 (Red Hat Linux)) id 1cEIFo-0005kM-Hi for linux-arm-kernel@lists.infradead.org; Tue, 06 Dec 2016 16:06:08 +0000 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 0E10915B2; Tue, 6 Dec 2016 08:05:45 -0800 (PST) Received: from e107154-lin.arm.com (e107154-lin.cambridge.arm.com [10.1.207.15]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id BBFF73F220; Tue, 6 Dec 2016 08:05:43 -0800 (PST) From: Kevin Brodsky To: linux-arm-kernel@lists.infradead.org Subject: [RFC PATCH v3 04/11] arm64: Refactor vDSO init/setup Date: Tue, 6 Dec 2016 16:03:46 +0000 Message-Id: <20161206160353.14581-5-kevin.brodsky@arm.com> X-Mailer: git-send-email 2.10.2 In-Reply-To: <20161206160353.14581-1-kevin.brodsky@arm.com> References: <20161206160353.14581-1-kevin.brodsky@arm.com> X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20161206_080604_834981_59CE3DF1 X-CRM114-Status: GOOD ( 16.71 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Jisheng Zhang , Catalin Marinas , Kevin Brodsky , Nathan Lynch , Will Deacon , Christopher Covington , Dmitry Safonov MIME-Version: 1.0 Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Virus-Scanned: ClamAV using ClamSMTP Move the logic for setting up mappings and pages for the vDSO into static functions. This makes the vDSO setup code more consistent with the compat side and will allow to reuse it for the future compat vDSO. Cc: Will Deacon Cc: Catalin Marinas Cc: Nathan Lynch Cc: Christopher Covington Cc: Dmitry Safonov Cc: Jisheng Zhang Signed-off-by: Kevin Brodsky --- arch/arm64/kernel/vdso.c | 170 +++++++++++++++++++++++++++-------------------- 1 file changed, 99 insertions(+), 71 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index d189e5be5039..4d3048619be8 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -39,8 +39,10 @@ #include #include -extern char vdso_start, vdso_end; -static unsigned long vdso_pages __ro_after_init; +struct vdso_mappings { + unsigned long num_code_pages; + struct vm_special_mapping data_mapping, code_mapping; +}; /* * The vDSO data page. @@ -51,6 +53,93 @@ static union { } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; +static int __init vdso_mappings_init(const char *name, + const char *code_start, + const char *code_end, + struct vdso_mappings *mappings) +{ + unsigned long i, num_code_pages; + struct page **pages; + + if (memcmp(code_start, "\177ELF", 4)) { + pr_err("%s is not a valid ELF object!\n", name); + return -EINVAL; + } + + num_code_pages = (code_end - code_start) >> PAGE_SHIFT; + pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n", + name, num_code_pages + 1, num_code_pages, code_start, 1L, + vdso_data); + + /* + * Allocate space for storing pointers to the vDSO code pages + the + * data page. The pointers must have the same lifetime as the mappings, + * which are static, so there is no need to keep track of the pointer + * array to free it. + */ + pages = kmalloc_array(num_code_pages + 1, sizeof(struct page *), + GFP_KERNEL); + if (pages == NULL) + return -ENOMEM; + + /* Grab the vDSO data page */ + pages[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data))); + + /* Grab the vDSO code pages */ + for (i = 0; i < num_code_pages; i++) + pages[i + 1] = pfn_to_page(PHYS_PFN(__pa(code_start)) + i); + + /* Populate the special mapping structures */ + mappings->data_mapping = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = &pages[0], + }; + + mappings->code_mapping = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = &pages[1], + }; + + mappings->num_code_pages = num_code_pages; + return 0; +} + +static int vdso_setup(struct mm_struct *mm, + const struct vdso_mappings *mappings) +{ + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; + + vdso_text_len = mappings->num_code_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto out; + } + + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &mappings->data_mapping); + if (IS_ERR(ret)) + goto out; + + vdso_base += PAGE_SIZE; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &mappings->code_mapping); + if (IS_ERR(ret)) + goto out; + + mm->context.vdso = (void *)vdso_base; + +out: + return PTR_ERR_OR_ZERO(ret); +} + #ifdef CONFIG_COMPAT /* sigreturn trampolines page */ @@ -175,90 +264,29 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) #endif /* CONFIG_COMPAT */ -static struct vm_special_mapping vdso_spec[2] __ro_after_init = { - { - .name = "[vvar]", - }, - { - .name = "[vdso]", - }, -}; +static struct vdso_mappings vdso_mappings __ro_after_init; static int __init vdso_init(void) { - int i; - struct page **vdso_pagelist; - - if (memcmp(&vdso_start, "\177ELF", 4)) { - pr_err("vDSO is not a valid ELF object!\n"); - return -EINVAL; - } + extern char vdso_start, vdso_end; - vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; - pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", - vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); - - /* Allocate the vDSO pagelist, plus a page for the data. */ - vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), - GFP_KERNEL); - if (vdso_pagelist == NULL) - return -ENOMEM; - - /* Grab the vDSO data page. */ - vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data))); - - /* Grab the vDSO code pages. */ - for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i); - - vdso_spec[0].pages = &vdso_pagelist[0]; - vdso_spec[1].pages = &vdso_pagelist[1]; - - return 0; + return vdso_mappings_init("vdso", &vdso_start, &vdso_end, + &vdso_mappings); } arch_initcall(vdso_init); -int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - unsigned long vdso_base, vdso_text_len, vdso_mapping_len; - void *ret; - - vdso_text_len = vdso_pages << PAGE_SHIFT; - /* Be sure to map the data page */ - vdso_mapping_len = vdso_text_len + PAGE_SIZE; + int ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; - vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); - if (IS_ERR_VALUE(vdso_base)) { - ret = ERR_PTR(vdso_base); - goto up_fail; - } - ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, - &vdso_spec[0]); - if (IS_ERR(ret)) - goto up_fail; - - vdso_base += PAGE_SIZE; - mm->context.vdso = (void *)vdso_base; - ret = _install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - &vdso_spec[1]); - if (IS_ERR(ret)) - goto up_fail; + ret = vdso_setup(mm, &vdso_mappings); up_write(&mm->mmap_sem); - return 0; - -up_fail: - mm->context.vdso = NULL; - up_write(&mm->mmap_sem); - return PTR_ERR(ret); + return ret; } /*