@@ -586,7 +586,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
struct page *page;
void *ptr = NULL;
- page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
return NULL;
@@ -1291,8 +1291,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
unsigned long order = get_order(size);
struct page *page;
- page = dma_alloc_from_contiguous(dev, count, order,
- gfp & __GFP_NOWARN);
+ page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
goto error;
@@ -159,7 +159,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page *page;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), gfp & __GFP_NOWARN);
+ get_order(size), gfp);
if (!page)
return NULL;
@@ -157,7 +157,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (gfpflags_allow_blocking(flag))
page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag & __GFP_NOWARN);
+ flag);
if (!page)
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
@@ -2692,7 +2692,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
return NULL;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag & __GFP_NOWARN);
+ get_order(size), flag);
if (!page)
return NULL;
}
@@ -3791,8 +3791,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
- page = dma_alloc_from_contiguous(dev, count, order,
- flags & __GFP_NOWARN);
+ page = dma_alloc_from_contiguous(dev, count, order, flags);
if (page && iommu_no_mapping(dev) &&
page_to_phys(page) + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn);
+ unsigned int order, gfp_t gfp_mask);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
static inline
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, bool no_warn)
+ unsigned int order, gfp_t gfp_mask)
{
return NULL;
}
@@ -182,7 +182,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* @dev: Pointer to device for which the allocation is performed.
* @count: Requested number of pages.
* @align: Requested alignment of pages (in PAGE_SIZE order).
- * @no_warn: Avoid printing message about failed allocation.
+ * @gfp_mask: GFP flags to use for this allocation.
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
@@ -190,12 +190,13 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int align, bool no_warn)
+ unsigned int align, gfp_t gfp_mask)
{
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
+ return cma_alloc(dev_get_cma_area(dev), count, align,
+ gfp_mask & __GFP_NOWARN);
}
/**
@@ -111,8 +111,7 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
again:
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(gfp)) {
- page = dma_alloc_from_contiguous(dev, count, page_order,
- gfp & __GFP_NOWARN);
+ page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
@@ -115,7 +115,7 @@ int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
+ pool_size_order, gfp);
else
page = alloc_pages(gfp, pool_size_order);
if (!page)
This reverts commit d834c5ab83febf9624ad3b16c3c348aa1e02014c. Commit d834c5ab83fe ("kernel/dma: remove unsupported gfp_mask parameter from dma_alloc_from_contiguous()") attempts to make more clear that the CMA allocator doesn't support all of the standard GFP flags by removing that parameter from cma_alloc(). Unfortunately, this don't make things much more clear about what CMA supports, as exemplified by the ARM DMA layer issue, which simply masks away the GFP_NOIO flag when calling the CMA allocator, letting it assume it is always the hardcoded GFP_KERNEL. The removal of gfp_flags doesn't make any easier to eventually support these flags in the CMA allocator, like the rest of this series attempt to do. Instead, this patch and the next patch restores that parameter. CC: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com> [Fix new callers] --- arch/arm/mm/dma-mapping.c | 5 ++--- arch/arm64/mm/dma-mapping.c | 2 +- arch/xtensa/kernel/pci-dma.c | 2 +- drivers/iommu/amd_iommu.c | 2 +- drivers/iommu/intel-iommu.c | 3 +-- include/linux/dma-contiguous.h | 4 ++-- kernel/dma/contiguous.c | 7 ++++--- kernel/dma/direct.c | 3 +-- kernel/dma/remap.c | 2 +- 9 files changed, 14 insertions(+), 16 deletions(-)