@@ -351,10 +351,21 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+#if IS_ENABLED(CONFIG_HAS_DMA) && IS_ENABLED(CONFIG_CGROUP_DMEM)
+struct dmem_cgroup_region *
+dma_get_dmem_cgroup_region(struct device *dev);
+#else
+static inline struct dmem_cgroup_region *
+dma_get_dmem_cgroup_region(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
@@ -608,10 +608,26 @@ static int __init dma_init_dmem_cgroup(void)
default_dmem_cgroup_region = region;
return 0;
}
core_initcall(dma_init_dmem_cgroup);
+
+struct dmem_cgroup_region *
+dma_get_dmem_cgroup_region(struct device *dev)
+{
+ struct dmem_cgroup_region *region;
+
+ region = dma_coherent_get_dmem_cgroup_region(dev);
+ if (region)
+ return region;
+
+ if (dma_alloc_direct(dev, get_dma_ops(dev)))
+ return dma_direct_get_dmem_cgroup_region(dev);
+
+ return default_dmem_cgroup_region;
+}
+EXPORT_SYMBOL(dma_get_dmem_cgroup_region);
#endif
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs)
{
Consumers of the DMA API will have to know which DMA region their device allocate from in order for them to charge the memory allocation in the right one. Let's provide an accessor for that region. Signed-off-by: Maxime Ripard <mripard@kernel.org> --- include/linux/dma-mapping.h | 11 +++++++++++ kernel/dma/mapping.c | 16 ++++++++++++++++ 2 files changed, 27 insertions(+)