Message ID | 20190826121944.515-7-hch@lst.de (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | [01/11] xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance | expand |
+ Boris, Juergen On Mon, 26 Aug 2019, Christoph Hellwig wrote: > x86 currently calls alloc_pages, but using dma-direct works as well > there, with the added benefit of using the CMA pool if available. > The biggest advantage is of course to remove a pointless bit of > architecture specific code. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > arch/x86/include/asm/xen/page-coherent.h | 16 ---------------- > drivers/xen/swiotlb-xen.c | 7 +++---- > include/xen/arm/page-coherent.h | 12 ------------ > 3 files changed, 3 insertions(+), 32 deletions(-) > > diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h > index 116777e7f387..8ee33c5edded 100644 > --- a/arch/x86/include/asm/xen/page-coherent.h > +++ b/arch/x86/include/asm/xen/page-coherent.h > @@ -5,22 +5,6 @@ > #include <asm/page.h> > #include <linux/dma-mapping.h> > > -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, > - dma_addr_t *dma_handle, gfp_t flags, > - unsigned long attrs) > -{ > - void *vstart = (void*)__get_free_pages(flags, get_order(size)); > - *dma_handle = virt_to_phys(vstart); This is where we need Boris and Juergen's opinion. From an ARM POV it looks OK. > - return vstart; > -} > - > -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, > - void *cpu_addr, dma_addr_t dma_handle, > - unsigned long attrs) > -{ > - free_pages((unsigned long) cpu_addr, get_order(size)); > -} > - > static inline void xen_dma_map_page(struct device *hwdev, struct page *page, > dma_addr_t dev_addr, unsigned long offset, size_t size, > enum dma_data_direction dir, unsigned long attrs) { } > diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c > index b8808677ae1d..f9dd4cb6e4b3 100644 > --- a/drivers/xen/swiotlb-xen.c > +++ b/drivers/xen/swiotlb-xen.c > @@ -299,8 +299,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, > * address. In fact on ARM virt_to_phys only works for kernel direct > * mapped RAM memory. Also see comment below. > */ > - ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); > - > + ret = dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); > if (!ret) > return ret; > > @@ -319,7 +318,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, > else { > if (xen_create_contiguous_region(phys, order, > fls64(dma_mask), dma_handle) != 0) { > - xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); > + dma_direct_free(hwdev, size, ret, (dma_addr_t)phys, attrs); > return NULL; > } > SetPageXenRemapped(virt_to_page(ret)); > @@ -351,7 +350,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, > TestClearPageXenRemapped(virt_to_page(vaddr))) > xen_destroy_contiguous_region(phys, order); > > - xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); > + dma_direct_free(hwdev, size, vaddr, (dma_addr_t)phys, attrs); > } > > /* > diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h > index a840d6949a87..0e244f4fec1a 100644 > --- a/include/xen/arm/page-coherent.h > +++ b/include/xen/arm/page-coherent.h > @@ -16,18 +16,6 @@ void __xen_dma_sync_single_for_cpu(struct device *hwdev, > void __xen_dma_sync_single_for_device(struct device *hwdev, > dma_addr_t handle, size_t size, enum dma_data_direction dir); > > -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, > - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) > -{ > - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); > -} > - > -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, > - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) > -{ > - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); > -} > - > static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, > dma_addr_t handle, size_t size, enum dma_data_direction dir) > { > -- > 2.20.1 >
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h index 116777e7f387..8ee33c5edded 100644 --- a/arch/x86/include/asm/xen/page-coherent.h +++ b/arch/x86/include/asm/xen/page-coherent.h @@ -5,22 +5,6 @@ #include <asm/page.h> #include <linux/dma-mapping.h> -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - void *vstart = (void*)__get_free_pages(flags, get_order(size)); - *dma_handle = virt_to_phys(vstart); - return vstart; -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - unsigned long attrs) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { } diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index b8808677ae1d..f9dd4cb6e4b3 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -299,8 +299,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, * address. In fact on ARM virt_to_phys only works for kernel direct * mapped RAM memory. Also see comment below. */ - ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); - + ret = dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); if (!ret) return ret; @@ -319,7 +318,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, else { if (xen_create_contiguous_region(phys, order, fls64(dma_mask), dma_handle) != 0) { - xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); + dma_direct_free(hwdev, size, ret, (dma_addr_t)phys, attrs); return NULL; } SetPageXenRemapped(virt_to_page(ret)); @@ -351,7 +350,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, TestClearPageXenRemapped(virt_to_page(vaddr))) xen_destroy_contiguous_region(phys, order); - xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); + dma_direct_free(hwdev, size, vaddr, (dma_addr_t)phys, attrs); } /* diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h index a840d6949a87..0e244f4fec1a 100644 --- a/include/xen/arm/page-coherent.h +++ b/include/xen/arm/page-coherent.h @@ -16,18 +16,6 @@ void __xen_dma_sync_single_for_cpu(struct device *hwdev, void __xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); -} - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) {
x86 currently calls alloc_pages, but using dma-direct works as well there, with the added benefit of using the CMA pool if available. The biggest advantage is of course to remove a pointless bit of architecture specific code. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/x86/include/asm/xen/page-coherent.h | 16 ---------------- drivers/xen/swiotlb-xen.c | 7 +++---- include/xen/arm/page-coherent.h | 12 ------------ 3 files changed, 3 insertions(+), 32 deletions(-)