@@ -7,10 +7,11 @@
*
* Also utilizing parts of Andrew Davis' SRAM heap:
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
+#include <linux/cgroup_dmem.h>
#include <linux/cma.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
@@ -276,23 +277,31 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
unsigned long len,
u32 fd_flags,
u64 heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+ struct dmem_cgroup_pool_state *pool;
struct cma_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
size_t size = PAGE_ALIGN(len);
pgoff_t pagecount = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
struct page *cma_pages;
struct dma_buf *dmabuf;
int ret = -ENOMEM;
pgoff_t pg;
+ ret = dmem_cgroup_try_charge(cma_get_dmem_cgroup_region(cma_heap->cma),
+ size, &pool, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto uncharge_cgroup;
+ }
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->len = size;
@@ -348,18 +357,23 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto free_pages;
}
+
+ dmabuf->cgroup_pool = pool;
+
return dmabuf;
free_pages:
kfree(buffer->pages);
free_cma:
cma_release(cma_heap->cma, cma_pages, pagecount);
free_buffer:
kfree(buffer);
+uncharge_cgroup:
+ dmem_cgroup_uncharge(pool, len);
return ERR_PTR(ret);
}
static const struct dma_heap_ops cma_heap_ops = {
Now that we have a DMEM region per CMA region, we can track the allocations of the CMA heap through DMEM. Signed-off-by: Maxime Ripard <mripard@kernel.org> --- drivers/dma-buf/heaps/cma_heap.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-)