@@ -189,6 +189,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
@@ -272,6 +273,7 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_GPL(dma_heap_add);
static char *dma_heap_devnode(struct device *dev, umode_t *mode)
{
@@ -58,6 +58,7 @@
#endif
struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
/*
* Default global CMA area size can be defined in kernel's .config.
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/kmemleak.h>
#include <linux/swap.h>
+#include <linux/module.h>
#include <trace/events/cma.h>
#include "cma.h"
@@ -54,6 +55,7 @@ const char *cma_get_name(const struct cma *cma)
{
return cma->name;
}
+EXPORT_SYMBOL_GPL(cma_get_name);
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
unsigned int align_order)
@@ -499,6 +501,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
+EXPORT_SYMBOL_GPL(cma_alloc);
/*
* cma_alloc_bulk() - allocate high order bulk pages from contiguous area with
@@ -609,6 +612,7 @@ int cma_alloc_bulk(struct cma *cma, unsigned int align, bool fast,
return ret;
}
+EXPORT_SYMBOL_GPL(cma_alloc_bulk);
/**
* cma_release() - release allocated pages
@@ -642,6 +646,7 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
return true;
}
+EXPORT_SYMBOL_GPL(cma_release);
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{