diff mbox

[v1,01/14] tee: flexible shared memory pool creation

Message ID 1506621851-6929-2-git-send-email-volodymyr_babchuk@epam.com (mailing list archive)
State New, archived
Headers show

Commit Message

Volodymyr Babchuk Sept. 28, 2017, 6:03 p.m. UTC
From: Jens Wiklander <jens.wiklander@linaro.org>

Makes creation of shm pools more flexible by adding new more primitive
functions to allocate a shm pool. This makes it easier to add driver
specific shm pool management.

Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Signed-off-by: Volodymyr Babchuk <vlad.babchuk@gmail.com>
---
 drivers/tee/tee_private.h  |  57 +---------------
 drivers/tee/tee_shm.c      |   8 +--
 drivers/tee/tee_shm_pool.c | 165 ++++++++++++++++++++++++++++-----------------
 include/linux/tee_drv.h    |  91 +++++++++++++++++++++++++
 4 files changed, 199 insertions(+), 122 deletions(-)
diff mbox

Patch

diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 21cb6be..2bc2b5a 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -21,68 +21,15 @@ 
 #include <linux/mutex.h>
 #include <linux/types.h>
 
-struct tee_device;
-
-/**
- * struct tee_shm - shared memory object
- * @teedev:	device used to allocate the object
- * @ctx:	context using the object, if NULL the context is gone
- * @link	link element
- * @paddr:	physical address of the shared memory
- * @kaddr:	virtual address of the shared memory
- * @size:	size of shared memory
- * @dmabuf:	dmabuf used to for exporting to user space
- * @flags:	defined by TEE_SHM_* in tee_drv.h
- * @id:		unique id of a shared memory object on this device
- */
-struct tee_shm {
-	struct tee_device *teedev;
-	struct tee_context *ctx;
-	struct list_head link;
-	phys_addr_t paddr;
-	void *kaddr;
-	size_t size;
-	struct dma_buf *dmabuf;
-	u32 flags;
-	int id;
-};
-
-struct tee_shm_pool_mgr;
-
-/**
- * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
- * @alloc:	called when allocating shared memory
- * @free:	called when freeing shared memory
- */
-struct tee_shm_pool_mgr_ops {
-	int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
-		     size_t size);
-	void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
-};
-
-/**
- * struct tee_shm_pool_mgr - shared memory manager
- * @ops:		operations
- * @private_data:	private data for the shared memory manager
- */
-struct tee_shm_pool_mgr {
-	const struct tee_shm_pool_mgr_ops *ops;
-	void *private_data;
-};
-
 /**
  * struct tee_shm_pool - shared memory pool
  * @private_mgr:	pool manager for shared memory only between kernel
  *			and secure world
  * @dma_buf_mgr:	pool manager for shared memory exported to user space
- * @destroy:		called when destroying the pool
- * @private_data:	private data for the pool
  */
 struct tee_shm_pool {
-	struct tee_shm_pool_mgr private_mgr;
-	struct tee_shm_pool_mgr dma_buf_mgr;
-	void (*destroy)(struct tee_shm_pool *pool);
-	void *private_data;
+	struct tee_shm_pool_mgr *private_mgr;
+	struct tee_shm_pool_mgr *dma_buf_mgr;
 };
 
 #define TEE_DEVICE_FLAG_REGISTERED	0x1
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 4bc7956..fdda89e 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -32,9 +32,9 @@  static void tee_shm_release(struct tee_shm *shm)
 	mutex_unlock(&teedev->mutex);
 
 	if (shm->flags & TEE_SHM_DMA_BUF)
-		poolm = &teedev->pool->dma_buf_mgr;
+		poolm = teedev->pool->dma_buf_mgr;
 	else
-		poolm = &teedev->pool->private_mgr;
+		poolm = teedev->pool->private_mgr;
 
 	poolm->ops->free(poolm, shm);
 	kfree(shm);
@@ -139,9 +139,9 @@  struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 	shm->teedev = teedev;
 	shm->ctx = ctx;
 	if (flags & TEE_SHM_DMA_BUF)
-		poolm = &teedev->pool->dma_buf_mgr;
+		poolm = teedev->pool->dma_buf_mgr;
 	else
-		poolm = &teedev->pool->private_mgr;
+		poolm = teedev->pool->private_mgr;
 
 	rc = poolm->ops->alloc(poolm, shm, size);
 	if (rc) {
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index fb4f852..e6d4b9e 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -44,49 +44,18 @@  static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
 	shm->kaddr = NULL;
 }
 
+static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+	gen_pool_destroy(poolm->private_data);
+	kfree(poolm);
+}
+
 static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
 	.alloc = pool_op_gen_alloc,
 	.free = pool_op_gen_free,
+	.destroy_poolmgr = pool_op_gen_destroy_poolmgr,
 };
 
-static void pool_res_mem_destroy(struct tee_shm_pool *pool)
-{
-	gen_pool_destroy(pool->private_mgr.private_data);
-	gen_pool_destroy(pool->dma_buf_mgr.private_data);
-}
-
-static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
-				 struct tee_shm_pool_mem_info *info,
-				 int min_alloc_order)
-{
-	size_t page_mask = PAGE_SIZE - 1;
-	struct gen_pool *genpool = NULL;
-	int rc;
-
-	/*
-	 * Start and end must be page aligned
-	 */
-	if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
-	    (info->size & page_mask))
-		return -EINVAL;
-
-	genpool = gen_pool_create(min_alloc_order, -1);
-	if (!genpool)
-		return -ENOMEM;
-
-	gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
-	rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
-			       -1);
-	if (rc) {
-		gen_pool_destroy(genpool);
-		return rc;
-	}
-
-	mgr->private_data = genpool;
-	mgr->ops = &pool_ops_generic;
-	return 0;
-}
-
 /**
  * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
  * memory range
@@ -104,42 +73,109 @@  struct tee_shm_pool *
 tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
 			   struct tee_shm_pool_mem_info *dmabuf_info)
 {
-	struct tee_shm_pool *pool = NULL;
-	int ret;
-
-	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-	if (!pool) {
-		ret = -ENOMEM;
-		goto err;
-	}
+	struct tee_shm_pool_mgr *priv_mgr;
+	struct tee_shm_pool_mgr *dmabuf_mgr;
+	void *rc;
 
 	/*
 	 * Create the pool for driver private shared memory
 	 */
-	ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
-				    3 /* 8 byte aligned */);
-	if (ret)
-		goto err;
+	rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
+					    priv_info->size,
+					    3 /* 8 byte aligned */);
+	if (IS_ERR(rc))
+		return rc;
+	priv_mgr = rc;
 
 	/*
 	 * Create the pool for dma_buf shared memory
 	 */
-	ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
-				    PAGE_SHIFT);
-	if (ret)
+	rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
+					    dmabuf_info->paddr,
+					    dmabuf_info->size, PAGE_SHIFT);
+	if (IS_ERR(rc))
+		goto err_free_priv_mgr;
+	dmabuf_mgr = rc;
+
+	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+	if (IS_ERR(rc))
+		goto err_free_dmabuf_mgr;
+
+	return rc;
+
+err_free_dmabuf_mgr:
+	tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+	tee_shm_pool_mgr_destroy(priv_mgr);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+							phys_addr_t paddr,
+							size_t size,
+							int min_alloc_order)
+{
+	const size_t page_mask = PAGE_SIZE - 1;
+	struct tee_shm_pool_mgr *mgr;
+	int rc;
+
+	/* Start and end must be page aligned */
+	if (vaddr & page_mask || paddr & page_mask || size & page_mask)
+		return ERR_PTR(-EINVAL);
+
+	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return ERR_PTR(-ENOMEM);
+
+	mgr->private_data = gen_pool_create(min_alloc_order, -1);
+	if (!mgr->private_data) {
+		rc = -ENOMEM;
 		goto err;
+	}
 
-	pool->destroy = pool_res_mem_destroy;
-	return pool;
+	gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
+	rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
+	if (rc) {
+		gen_pool_destroy(mgr->private_data);
+		goto err;
+	}
+
+	mgr->ops = &pool_ops_generic;
+
+	return mgr;
 err:
-	if (ret == -ENOMEM)
-		pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
-	if (pool && pool->private_mgr.private_data)
-		gen_pool_destroy(pool->private_mgr.private_data);
-	kfree(pool);
-	return ERR_PTR(ret);
+	kfree(mgr);
+
+	return ERR_PTR(rc);
 }
-EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
+
+static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
+{
+	return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
+		mgr->ops->destroy_poolmgr;
+}
+
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+					struct tee_shm_pool_mgr *dmabuf_mgr)
+{
+	struct tee_shm_pool *pool;
+
+	if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
+		return ERR_PTR(-EINVAL);
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	pool->private_mgr = priv_mgr;
+	pool->dma_buf_mgr = dmabuf_mgr;
+
+	return pool;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
 
 /**
  * tee_shm_pool_free() - Free a shared memory pool
@@ -150,7 +186,10 @@  EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
  */
 void tee_shm_pool_free(struct tee_shm_pool *pool)
 {
-	pool->destroy(pool);
+	if (pool->private_mgr)
+		tee_shm_pool_mgr_destroy(pool->private_mgr);
+	if (pool->dma_buf_mgr)
+		tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
 	kfree(pool);
 }
 EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index cb889af..e9be4a4 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -150,6 +150,97 @@  int tee_device_register(struct tee_device *teedev);
 void tee_device_unregister(struct tee_device *teedev);
 
 /**
+ * struct tee_shm - shared memory object
+ * @teedev:	device used to allocate the object
+ * @ctx:	context using the object, if NULL the context is gone
+ * @link	link element
+ * @paddr:	physical address of the shared memory
+ * @kaddr:	virtual address of the shared memory
+ * @size:	size of shared memory
+ * @offset:	offset of buffer in user space
+ * @pages:	locked pages from userspace
+ * @num_pages:	number of locked pages
+ * @dmabuf:	dmabuf used to for exporting to user space
+ * @flags:	defined by TEE_SHM_* in tee_drv.h
+ * @id:		unique id of a shared memory object on this device
+ *
+ * This pool is only supposed to be accessed directly from the TEE
+ * subsystem and from drivers that implements their own shm pool manager.
+ */
+struct tee_shm {
+	struct tee_device *teedev;
+	struct tee_context *ctx;
+	struct list_head link;
+	phys_addr_t paddr;
+	void *kaddr;
+	size_t size;
+	unsigned int offset;
+	struct page **pages;
+	size_t num_pages;
+	struct dma_buf *dmabuf;
+	u32 flags;
+	int id;
+};
+
+/**
+ * struct tee_shm_pool_mgr - shared memory manager
+ * @ops:		operations
+ * @private_data:	private data for the shared memory manager
+ */
+struct tee_shm_pool_mgr {
+	const struct tee_shm_pool_mgr_ops *ops;
+	void *private_data;
+};
+
+/**
+ * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * @alloc:		called when allocating shared memory
+ * @free:		called when freeing shared memory
+ * @destroy_poolmgr:	called when destroying the pool manager
+ */
+struct tee_shm_pool_mgr_ops {
+	int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
+		     size_t size);
+	void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
+	void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
+};
+
+/**
+ * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
+ * @priv_mgr:	manager for driver private shared memory allocations
+ * @dmabuf_mgr:	manager for dma-buf shared memory allocations
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+					struct tee_shm_pool_mgr *dmabuf_mgr);
+
+/*
+ * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
+ * memory
+ * @vaddr:	Virtual address of start of pool
+ * @paddr:	Physical address of start of pool
+ * @size:	Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+							phys_addr_t paddr,
+							size_t size,
+							int min_alloc_order);
+
+/**
+ * tee_shm_pool_mgr_destroy() - Free a shared memory manager
+ */
+static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
+{
+	poolm->ops->destroy_poolmgr(poolm);
+}
+
+/**
  * struct tee_shm_pool_mem_info - holds information needed to create a shared
  * memory pool
  * @vaddr:	Virtual address of start of pool