@@ -4,6 +4,8 @@
*/
#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
#include <drm/mediatek_drm.h>
#include <drm/drm.h>
@@ -102,6 +104,81 @@ struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
return ERR_PTR(ret);
}
+struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev,
+ const char *heap, size_t size)
+{
+ struct mtk_drm_private *priv = dev->dev_private;
+ struct mtk_gem_obj *mtk_gem;
+ struct drm_gem_object *obj;
+ struct dma_heap *dma_heap;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct iosys_map map = {};
+ int ret;
+
+ mtk_gem = mtk_gem_init(dev, size);
+ if (IS_ERR(mtk_gem))
+ return ERR_CAST(mtk_gem);
+
+ obj = &mtk_gem->base;
+
+ dma_heap = dma_heap_find(heap);
+ if (!dma_heap) {
+ DRM_ERROR("heap find fail\n");
+ goto err_gem_free;
+ }
+ dma_buf = dma_heap_buffer_alloc(dma_heap, size,
+ O_RDWR | O_CLOEXEC, DMA_HEAP_VALID_HEAP_FLAGS);
+ if (IS_ERR(dma_buf)) {
+ DRM_ERROR("buffer alloc fail\n");
+ dma_heap_put(dma_heap);
+ goto err_gem_free;
+ }
+ dma_heap_put(dma_heap);
+
+ attach = dma_buf_attach(dma_buf, priv->dma_dev);
+ if (IS_ERR(attach)) {
+ DRM_ERROR("attach fail, return\n");
+ dma_buf_put(dma_buf);
+ goto err_gem_free;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ DRM_ERROR("map failed, detach and return\n");
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ goto err_gem_free;
+ }
+ obj->import_attach = attach;
+ mtk_gem->dma_addr = sg_dma_address(sgt->sgl);
+ mtk_gem->sg = sgt;
+ mtk_gem->size = dma_buf->size;
+
+ if (!strcmp(heap, "mtk_svp") || !strcmp(heap, "mtk_svp_cma")) {
+ /* secure buffer can not be mapped */
+ mtk_gem->secure = true;
+ } else {
+ ret = dma_buf_vmap(dma_buf, &map);
+ mtk_gem->kvaddr = map.vaddr;
+ if (ret) {
+ DRM_ERROR("map failed, ret=%d\n", ret);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ mtk_gem->kvaddr = NULL;
+ }
+ }
+
+ return mtk_gem;
+
+err_gem_free:
+ drm_gem_object_release(obj);
+ kfree(mtk_gem);
+ return ERR_PTR(-ENOMEM);
+}
+
void mtk_gem_free_object(struct drm_gem_object *obj)
{
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
@@ -229,7 +306,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
+ mtk_gem->secure = !sg_page(sg->sgl);
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
+ mtk_gem->size = attach->dmabuf->size;
mtk_gem->sg = sg;
return &mtk_gem->base;
@@ -304,7 +383,11 @@ int mtk_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_mtk_gem_create *args = data;
int ret;
- mtk_gem = mtk_gem_create(dev, args->size, false);
+ if (args->flags & DRM_MTK_GEM_CREATE_ENCRYPTED)
+ mtk_gem = mtk_gem_create_from_heap(dev, "mtk_svp_cma", args->size);
+ else
+ mtk_gem = mtk_gem_create(dev, args->size, false);
+
if (IS_ERR(mtk_gem))
return PTR_ERR(mtk_gem);
@@ -27,9 +27,11 @@ struct mtk_gem_obj {
void *cookie;
void *kvaddr;
dma_addr_t dma_addr;
+ size_t size;
unsigned long dma_attrs;
struct sg_table *sg;
struct page **pages;
+ bool secure;
};
#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base)
@@ -39,6 +41,8 @@ struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size,
bool alloc_kmap);
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev,
+ const char *heap, size_t size);
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);