diff mbox series

[v2,3/4] dma-buf: add export symbol for dma-heap

Message ID 20201201175144.3996569-4-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series Chunk Heap Support on DMA-HEAP | expand

Commit Message

Minchan Kim Dec. 1, 2020, 5:51 p.m. UTC
From: Hyesoo Yu <hyesoo.yu@samsung.com>

The heaps could be added as module, so some functions should
be exported to register dma-heaps. And dma-heap of module can use
cma area to allocate and free. However the function related cma
is not exported now. Let's export them for next patches.

Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 drivers/dma-buf/dma-heap.c | 2 ++
 kernel/dma/contiguous.c    | 1 +
 mm/cma.c                   | 5 +++++
 3 files changed, 8 insertions(+)

Comments

Christoph Hellwig Dec. 2, 2020, 1:51 p.m. UTC | #1
On Tue, Dec 01, 2020 at 09:51:43AM -0800, Minchan Kim wrote:
> From: Hyesoo Yu <hyesoo.yu@samsung.com>
> 
> The heaps could be added as module, so some functions should
> be exported to register dma-heaps. And dma-heap of module can use
> cma area to allocate and free. However the function related cma
> is not exported now. Let's export them for next patches.
> 
> Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
> Signed-off-by: Minchan Kim <minchan@kernel.org>

NAK.  I really don't think any module has business gaining that
kind of low-level access to the various symbols.  I think in general
your heap can just be built-in and we should be fine.
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index afd22c9dbdcf..cc6339cbca09 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -189,6 +189,7 @@  void *dma_heap_get_drvdata(struct dma_heap *heap)
 {
 	return heap->priv;
 }
+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
 
 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
 {
@@ -272,6 +273,7 @@  struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
 	kfree(heap);
 	return err_ret;
 }
+EXPORT_SYMBOL_GPL(dma_heap_add);
 
 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
 {
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 3d63d91cba5c..7e9777119b29 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -58,6 +58,7 @@ 
 #endif
 
 struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
 
 /*
  * Default global CMA area size can be defined in kernel's .config.
diff --git a/mm/cma.c b/mm/cma.c
index 4459045fa717..d39cb7066b9e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@ 
 #include <linux/io.h>
 #include <linux/kmemleak.h>
 #include <linux/swap.h>
+#include <linux/module.h>
 #include <trace/events/cma.h>
 
 #include "cma.h"
@@ -54,6 +55,7 @@  const char *cma_get_name(const struct cma *cma)
 {
 	return cma->name;
 }
+EXPORT_SYMBOL_GPL(cma_get_name);
 
 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
 					     unsigned int align_order)
@@ -499,6 +501,7 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	pr_debug("%s(): returned %p\n", __func__, page);
 	return page;
 }
+EXPORT_SYMBOL_GPL(cma_alloc);
 
 /*
  * cma_alloc_bulk() - allocate high order bulk pages from contiguous area with
@@ -609,6 +612,7 @@  int cma_alloc_bulk(struct cma *cma, unsigned int align, bool fast,
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cma_alloc_bulk);
 
 /**
  * cma_release() - release allocated pages
@@ -642,6 +646,7 @@  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(cma_release);
 
 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
 {