Message ID | 3-v3-1893cd8b9369+1925-mlx5_arm_wc_jgg@nvidia.com (mailing list archive) |
---|---|
State | Accepted |
Delegated to: | Jason Gunthorpe |
Headers | show |
Series | Fix mlx5 write combining support on new ARM64 cores | expand |
On Thu, Apr 11, 2024, at 18:46, Jason Gunthorpe wrote: > Complete switching the __iowriteXX_copy() routines over to use #define and > arch provided inline/macro functions instead of weak symbols. > > S390 has an implementation that simply calls another memcpy > function. Inline this so the callers don't have to do two jumps. > > Acked-by: Niklas Schnelle <schnelle@linux.ibm.com> > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > --- > arch/s390/include/asm/io.h | 7 +++++++ > arch/s390/pci/pci.c | 6 ------ > include/linux/io.h | 3 +++ > lib/iomap_copy.c | 7 +++---- > 4 files changed, 13 insertions(+), 10 deletions(-) For the common code bits: Acked-by: Arnd Bergmann <arnd@arndb.de> > -void __attribute__((weak)) __iowrite64_copy(void __iomem *to, > - const void *from, > - size_t count) > +#ifndef __iowrite64_copy > +void __iowrite64_copy(void __iomem *to, const void *from, size_t count) > { I'm always happy to see __weak functions get cleaned up. Arnd
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 00704fc8a54b30..0fbc992d7a5ea7 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -81,6 +81,13 @@ static inline void __iowrite32_copy(void __iomem *to, const void *from, } #define __iowrite32_copy __iowrite32_copy +static inline void __iowrite64_copy(void __iomem *to, const void *from, + size_t count) +{ + zpci_memcpy_toio(to, from, count * 8); +} +#define __iowrite64_copy __iowrite64_copy + #endif /* CONFIG_PCI */ #include <asm-generic/io.h> diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 26afde0d1ed34c..0de0f6e405b51e 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -250,12 +250,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, return 0; } -/* combine single writes by using store-block insn */ -void __iowrite64_copy(void __iomem *to, const void *from, size_t count) -{ - zpci_memcpy_toio(to, from, count * 8); -} - void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot) { diff --git a/include/linux/io.h b/include/linux/io.h index ce86120ce9d526..42e132808f0035 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -21,7 +21,10 @@ void __iowrite32_copy(void __iomem *to, const void *from, size_t count); #endif void __ioread32_copy(void *to, const void __iomem *from, size_t count); + +#ifndef __iowrite64_copy void __iowrite64_copy(void __iomem *to, const void *from, size_t count); +#endif #ifdef CONFIG_MMU int ioremap_page_range(unsigned long addr, unsigned long end, diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 8ddcbb53507dfe..2fd5712fb7c02b 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy); * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ -void __attribute__((weak)) __iowrite64_copy(void __iomem *to, - const void *from, - size_t count) +#ifndef __iowrite64_copy +void __iowrite64_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; @@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to, __iowrite32_copy(to, from, count * 2); #endif } - EXPORT_SYMBOL_GPL(__iowrite64_copy); +#endif