From patchwork Tue Mar 1 10:53:10 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoph Hellwig X-Patchwork-Id: 12764620 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 6BBF9C433FE for ; Tue, 1 Mar 2022 11:01:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20210309; h=Sender: Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To: Message-Id:Date:Subject:Cc:To:From:Reply-To:Content-ID:Content-Description: Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID: List-Owner; bh=PDh0HvctbW6D3IwZxO2CarxkoRjKKuBy7eE45EWcxiU=; b=HSmvYeCC6xDbas lNSaY2bIbUvAYzb1HsV+HgnaDSnUL0cBycqDQZirT/gQMpmVUKcE5jKxBRwlnD+cXN56t84BGKn4h sVrUiP9ETSyy1Kj3RZ9dsSL1ueMx4DtzF8Pp5GLHC+W//P+h7fwZB7zis8P5mVC3Av9Y9EDswdhTS +8nG+nVh6qpCuWBpNrHY3I9KOnujc7FW7U0MERC2dFIhT5Mg14SHS0kSAEgRD6VaxjCj2fmVyq3R4 jnpFpqwyLF4PMLT0qQ+f+4nxx7A5u1GrmMLvkDZ3MMc+oE9Uyej/spSBuECK/eO76OO28PW1hr/PQ VitQ3zUqa8jyKlsgihog==; Received: from localhost ([::1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux)) id 1nP0Fm-00GHmO-Nd; Tue, 01 Mar 2022 11:01:14 +0000 Received: from [2.53.44.23] (helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.94.2 #2 (Red Hat Linux)) id 1nP08i-00GDLI-3y; Tue, 01 Mar 2022 10:53:56 +0000 From: Christoph Hellwig To: iommu@lists.linux-foundation.org Cc: x86@kernel.org, Anshuman Khandual , Tom Lendacky , Konrad Rzeszutek Wilk , Stefano Stabellini , Boris Ostrovsky , Juergen Gross , Joerg Roedel , David Woodhouse , Lu Baolu , Robin Murphy , linux-arm-kernel@lists.infradead.org, xen-devel@lists.xenproject.org, linux-ia64@vger.kernel.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, linux-hyperv@vger.kernel.org, tboot-devel@lists.sourceforge.net, linux-pci@vger.kernel.org Subject: [PATCH 11/12] swiotlb: merge swiotlb-xen initialization into swiotlb Date: Tue, 1 Mar 2022 12:53:10 +0200 Message-Id: <20220301105311.885699-12-hch@lst.de> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20220301105311.885699-1-hch@lst.de> References: <20220301105311.885699-1-hch@lst.de> MIME-Version: 1.0 X-BeenThere: linux-riscv@lists.infradead.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: "linux-riscv" Errors-To: linux-riscv-bounces+linux-riscv=archiver.kernel.org@lists.infradead.org Allow to pass a remap argument to the swiotlb initialization functions to handle the Xen/x86 remap case. ARM/ARM64 never did any remapping from xen_swiotlb_fixup, so we don't even need that quirk. Signed-off-by: Christoph Hellwig Tested-by: Stefano Stabellini Signed-off-by: Stefano Stabellini --- arch/arm/xen/mm.c | 23 +++--- arch/x86/include/asm/xen/page.h | 5 -- arch/x86/kernel/pci-dma.c | 19 +++-- arch/x86/pci/sta2x11-fixup.c | 2 +- drivers/xen/swiotlb-xen.c | 128 +------------------------------- include/linux/swiotlb.h | 7 +- include/xen/arm/page.h | 1 - include/xen/swiotlb-xen.h | 8 +- kernel/dma/swiotlb.c | 120 +++++++++++++++--------------- 9 files changed, 96 insertions(+), 217 deletions(-) diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index a7e54a087b802..58b40f87617d3 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -23,22 +23,20 @@ #include #include -unsigned long xen_get_swiotlb_free_pages(unsigned int order) +static gfp_t xen_swiotlb_gfp(void) { phys_addr_t base; - gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; u64 i; for_each_mem_range(i, &base, NULL) { if (base < (phys_addr_t)0xffffffff) { if (IS_ENABLED(CONFIG_ZONE_DMA32)) - flags |= __GFP_DMA32; - else - flags |= __GFP_DMA; - break; + return __GFP_DMA32; + return __GFP_DMA; } } - return __get_free_pages(flags, order); + + return GFP_KERNEL; } static bool hypercall_cflush = false; @@ -143,10 +141,15 @@ static int __init xen_mm_init(void) if (!xen_swiotlb_detect()) return 0; - rc = xen_swiotlb_init(); /* we can work with the default swiotlb */ - if (rc < 0 && rc != -EEXIST) - return rc; + if (!io_tlb_default_mem.nslabs) { + if (!xen_initial_domain()) + return -EINVAL; + rc = swiotlb_init_late(swiotlb_size_or_default(), + xen_swiotlb_gfp(), NULL); + if (rc < 0) + return rc; + } cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index e989bc2269f54..1fc67df500145 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev, return false; } -static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) -{ - return __get_free_pages(__GFP_NOWARN, order); -} - #endif /* _ASM_X86_XEN_PAGE_H */ diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index e0def4b1c3181..2f2c468acb955 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -71,15 +71,12 @@ static inline void __init pci_swiotlb_detect(void) #endif /* CONFIG_SWIOTLB */ #ifdef CONFIG_SWIOTLB_XEN -static bool xen_swiotlb; - static void __init pci_xen_swiotlb_init(void) { if (!xen_initial_domain() && !x86_swiotlb_enable) return; x86_swiotlb_enable = false; - xen_swiotlb = true; - xen_swiotlb_init_early(); + swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup); dma_ops = &xen_swiotlb_dma_ops; if (IS_ENABLED(CONFIG_PCI)) pci_request_acs(); @@ -87,14 +84,16 @@ static void __init pci_xen_swiotlb_init(void) int pci_xen_swiotlb_init_late(void) { - int rc; - - if (xen_swiotlb) + if (dma_ops == &xen_swiotlb_dma_ops) return 0; - rc = xen_swiotlb_init(); - if (rc) - return rc; + /* we can work with the default swiotlb */ + if (!io_tlb_default_mem.nslabs) { + int rc = swiotlb_init_late(swiotlb_size_or_default(), + GFP_KERNEL, xen_swiotlb_fixup); + if (rc < 0) + return rc; + } /* XXX: this switches the dma ops under live devices! */ dma_ops = &xen_swiotlb_dma_ops; diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index c7e6faf59a861..7368afc039987 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev) int size = STA2X11_SWIOTLB_SIZE; /* First instance: register your own swiotlb area */ dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size); - if (swiotlb_init_late(size, GFP_DMA)) + if (swiotlb_init_late(size, GFP_DMA, NULL)) dev_emerg(&pdev->dev, "init swiotlb failed\n"); } list_add(&instance->list, &sta2x11_instance_list); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index c2da3eb4826e8..df8085b50df10 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) return 0; } -static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) +int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int rc; unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); @@ -130,132 +130,6 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) return 0; } -enum xen_swiotlb_err { - XEN_SWIOTLB_UNKNOWN = 0, - XEN_SWIOTLB_ENOMEM, - XEN_SWIOTLB_EFIXUP -}; - -static const char *xen_swiotlb_error(enum xen_swiotlb_err err) -{ - switch (err) { - case XEN_SWIOTLB_ENOMEM: - return "Cannot allocate Xen-SWIOTLB buffer\n"; - case XEN_SWIOTLB_EFIXUP: - return "Failed to get contiguous memory for DMA from Xen!\n"\ - "You either: don't have the permissions, do not have"\ - " enough free memory under 4GB, or the hypervisor memory"\ - " is too fragmented!"; - default: - break; - } - return ""; -} - -int xen_swiotlb_init(void) -{ - enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; - unsigned long bytes = swiotlb_size_or_default(); - unsigned long nslabs = bytes >> IO_TLB_SHIFT; - unsigned int order, repeat = 3; - int rc = -ENOMEM; - char *start; - - if (io_tlb_default_mem.nslabs) { - pr_warn("swiotlb buffer already initialized\n"); - return -EEXIST; - } - -retry: - m_ret = XEN_SWIOTLB_ENOMEM; - order = get_order(bytes); - - /* - * Get IO TLB memory from any location. - */ -#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) -#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) - while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - start = (void *)xen_get_swiotlb_free_pages(order); - if (start) - break; - order--; - } - if (!start) - goto exit; - if (order != get_order(bytes)) { - pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", - (PAGE_SIZE << order) >> 20); - nslabs = SLABS_PER_PAGE << order; - bytes = nslabs << IO_TLB_SHIFT; - } - - /* - * And replace that memory with pages under 4GB. - */ - rc = xen_swiotlb_fixup(start, nslabs); - if (rc) { - free_pages((unsigned long)start, order); - m_ret = XEN_SWIOTLB_EFIXUP; - goto error; - } - rc = swiotlb_late_init_with_tbl(start, nslabs); - if (rc) - return rc; - return 0; -error: - if (nslabs > 1024 && repeat--) { - /* Min is 2MB */ - nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); - bytes = nslabs << IO_TLB_SHIFT; - pr_info("Lowering to %luMB\n", bytes >> 20); - goto retry; - } -exit: - pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - return rc; -} - -#ifdef CONFIG_X86 -void __init xen_swiotlb_init_early(void) -{ - unsigned long bytes = swiotlb_size_or_default(); - unsigned long nslabs = bytes >> IO_TLB_SHIFT; - unsigned int repeat = 3; - char *start; - int rc; - -retry: - /* - * Get IO TLB memory from any location. - */ - start = memblock_alloc(PAGE_ALIGN(bytes), - IO_TLB_SEGSIZE << IO_TLB_SHIFT); - if (!start) - panic("%s: Failed to allocate %lu bytes\n", - __func__, PAGE_ALIGN(bytes)); - - /* - * And replace that memory with pages under 4GB. - */ - rc = xen_swiotlb_fixup(start, nslabs); - if (rc) { - memblock_free(start, PAGE_ALIGN(bytes)); - if (nslabs > 1024 && repeat--) { - /* Min is 2MB */ - nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); - bytes = nslabs << IO_TLB_SHIFT; - pr_info("Lowering to %luMB\n", bytes >> 20); - goto retry; - } - panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); - } - - if (swiotlb_init_with_tbl(start, nslabs, SWIOTLB_VERBOSE)) - panic("Cannot allocate SWIOTLB buffer"); -} -#endif /* CONFIG_X86 */ - static void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ee655f2e4d28b..919cf82ed978e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -34,10 +34,11 @@ struct scatterlist; /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) -int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags); unsigned long swiotlb_size_or_default(void); -extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); -int swiotlb_init_late(size_t size, gfp_t gfp_mask); +int swiotlb_init_late(size_t size, gfp_t gfp_mask, + int (*remap)(void *tlb, unsigned long nslabs)); +void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)); extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index ac1b654705631..7e199c6656b90 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h @@ -115,6 +115,5 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool xen_arch_need_swiotlb(struct device *dev, phys_addr_t phys, dma_addr_t dev_addr); -unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index b3e647f86e3e2..590ceb923f0c8 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -10,8 +10,12 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -int xen_swiotlb_init(void); -void __init xen_swiotlb_init_early(void); +#ifdef CONFIG_SWIOTLB_XEN +int xen_swiotlb_fixup(void *buf, unsigned long nslabs); +#else +#define xen_swiotlb_fixup NULL +#endif + extern const struct dma_map_ops xen_swiotlb_dma_ops; #endif /* __LINUX_SWIOTLB_XEN_H */ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 77cf73dc20a78..128363dc9b5bb 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -234,40 +234,17 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, return; } -int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, - unsigned int flags) -{ - struct io_tlb_mem *mem = &io_tlb_default_mem; - size_t alloc_size; - - if (swiotlb_force_disable) - return 0; - - /* protect against double initialization */ - if (WARN_ON_ONCE(mem->nslabs)) - return -ENOMEM; - - alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); - mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem->slots) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); - - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - mem->force_bounce = flags & SWIOTLB_FORCE; - - if (flags & SWIOTLB_VERBOSE) - swiotlb_print_info(); - return 0; -} - /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init swiotlb_init(bool addressing_limit, unsigned int flags) +void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)) { - size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); + struct io_tlb_mem *mem = &io_tlb_default_mem; + unsigned long nslabs = default_nslabs; + size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); + size_t bytes; void *tlb; if (!addressing_limit && !swiotlb_force_bounce) @@ -275,23 +252,48 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags) if (swiotlb_force_disable) return; + /* protect against double initialization */ + if (WARN_ON_ONCE(mem->nslabs)) + return; + /* * By default allocate the bonuce buffer memory from low memory. */ +retry: + bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); if (flags & SWIOTLB_ANY) tlb = memblock_alloc(bytes, PAGE_SIZE); else tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) - goto fail; - if (swiotlb_init_with_tbl(tlb, default_nslabs, flags)) - goto fail_free_mem; - return; + panic("%s: failed to allocate tlb structure\n", __func__); + + if (remap && remap(tlb, nslabs) < 0) { + memblock_free(tlb, PAGE_ALIGN(bytes)); + + /* Min is 2MB */ + if (nslabs <= 1024) + panic("%s: Failed to remap %zu bytes\n", + __func__, bytes); + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); + goto retry; + } + + mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->slots) + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); -fail_free_mem: - memblock_free(tlb, bytes); -fail: - pr_warn("Cannot allocate buffer"); + swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false); + mem->force_bounce = flags & SWIOTLB_FORCE; + + if (flags & SWIOTLB_VERBOSE) + swiotlb_print_info(); +} + +void __init swiotlb_init(bool addressing_limit, unsigned int flags) +{ + return swiotlb_init_remap(addressing_limit, flags, NULL); } /* @@ -299,8 +301,10 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags) * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ -int swiotlb_init_late(size_t size, gfp_t gfp_mask) +int swiotlb_init_late(size_t size, gfp_t gfp_mask, + int (*remap)(void *tlb, unsigned long nslabs)) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); unsigned long bytes; unsigned char *vstart = NULL; @@ -310,9 +314,14 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask) if (swiotlb_force_disable) return 0; + /* protect against double initialization */ + if (WARN_ON_ONCE(mem->nslabs)) + return -ENOMEM; + /* * Get IO TLB memory from the low pages */ +retry: order = get_order(nslabs << IO_TLB_SHIFT); nslabs = SLABS_PER_PAGE << order; bytes = nslabs << IO_TLB_SHIFT; @@ -333,33 +342,28 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask) (PAGE_SIZE << order) >> 20); nslabs = SLABS_PER_PAGE << order; } - rc = swiotlb_late_init_with_tbl(vstart, nslabs); - if (rc) - free_pages((unsigned long)vstart, order); - - return rc; -} - -int -swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) -{ - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long bytes = nslabs << IO_TLB_SHIFT; - if (swiotlb_force_disable) - return 0; + if (remap) + rc = remap(vstart, nslabs); + if (rc) { + free_pages((unsigned long)vstart, order); - /* protect against double initialization */ - if (WARN_ON_ONCE(mem->nslabs)) - return -ENOMEM; + /* Min is 2MB */ + if (nslabs <= 1024) + return rc; + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); + goto retry; + } mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(array_size(sizeof(*mem->slots), nslabs))); - if (!mem->slots) + if (!mem->slots) { + free_pages((unsigned long)vstart, order); return -ENOMEM; + } - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); + set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); swiotlb_print_info(); return 0;