diff mbox series

[RFC,09/10] crypto: engine: permit to batch requests

Message ID 20200114135936.32422-10-clabbe.montjoie@gmail.com (mailing list archive)
State RFC
Delegated to: Herbert Xu
Headers show
Series crypto: engine: permit to batch requests | expand

Commit Message

Corentin Labbe Jan. 14, 2020, 1:59 p.m. UTC
Now everything is ready, this patch permits to choose the number of
request to batch.

Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
---
 crypto/crypto_engine.c  | 32 +++++++++++++++++++++++++++-----
 include/crypto/engine.h |  2 ++
 2 files changed, 29 insertions(+), 5 deletions(-)

Comments

Iuliana Prodan Jan. 16, 2020, 11:34 a.m. UTC | #1
On 1/14/2020 4:00 PM, Corentin Labbe wrote:
> Now everything is ready, this patch permits to choose the number of
> request to batch.
> 
> Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
> ---
>   crypto/crypto_engine.c  | 32 +++++++++++++++++++++++++++-----
>   include/crypto/engine.h |  2 ++
>   2 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
> index e23a398ba330..e9cd9ec9a732 100644
> --- a/crypto/crypto_engine.c
> +++ b/crypto/crypto_engine.c
> @@ -114,6 +114,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   	}
>   
>   	engine->ct = 0;
> +retry:
>   	/* Get the fist request from the engine queue to handle */
>   	backlog = crypto_get_backlog(&engine->queue);
>   	async_req = crypto_dequeue_request(&engine->queue);
> @@ -151,7 +152,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   		}
>   		engine->cur_reqs[engine->ct].prepared = true;
>   	}
> -	engine->ct++;
> +	if (++engine->ct < engine->rmax && engine->queue.qlen > 0) {
> +		spin_lock_irqsave(&engine->queue_lock, flags);
> +		goto retry;
> +	}
>   	if (!enginectx->op.do_one_request) {
>   		dev_err(engine->dev, "failed to do request\n");
>   		ret = -EINVAL;
> @@ -393,15 +397,18 @@ int crypto_engine_stop(struct crypto_engine *engine)
>   EXPORT_SYMBOL_GPL(crypto_engine_stop);
>   
>   /**
> - * crypto_engine_alloc_init - allocate crypto hardware engine structure and
> + * crypto_engine_alloc_init2 - allocate crypto hardware engine structure and
>    * initialize it.
>    * @dev: the device attached with one hardware engine
>    * @rt: whether this queue is set to run as a realtime task
> + * @rmax: The number of request that the engine can batch in one
> + * @qlen: The size of the crypto queue
>    *
>    * This must be called from context that can sleep.
>    * Return: the crypto engine structure on success, else NULL.
>    */
> -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
> +struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
> +						int rmax, int qlen)

I think one _alloc_init function is enough, which will set the size of 
crypto-engine queue (not hardcoded like it is now) and the number of 
requests that the engine can execute in parallel.

{
>   	struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
>   	struct crypto_engine *engine;
> @@ -421,12 +428,12 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
>   	engine->priv_data = dev;
>   	snprintf(engine->name, sizeof(engine->name),
>   		 "%s-engine", dev_name(dev));
> -	engine->rmax = 1;
> +	engine->rmax = rmax;
>   	engine->cur_reqs = devm_kzalloc(dev, sizeof(struct cur_req) * engine->rmax, GFP_KERNEL);
>   	if (!engine->cur_reqs)
>   		return NULL;
>   
> -	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
> +	crypto_init_queue(&engine->queue, qlen);
>   	spin_lock_init(&engine->queue_lock);
>   
>   	engine->kworker = kthread_create_worker(0, "%s", engine->name);
> @@ -443,6 +450,21 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
>   
>   	return engine;
>   }
> +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init2);
> +
> +/**
> + * crypto_engine_alloc_init - allocate crypto hardware engine structure and
> + * initialize it.
> + * @dev: the device attached with one hardware engine
> + * @rt: whether this queue is set to run as a realtime task
> + *
> + * This must be called from context that can sleep.
> + * Return: the crypto engine structure on success, else NULL.
> + */
> +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
> +{
> +	return crypto_engine_alloc_init2(dev, rt, 1, CRYPTO_ENGINE_MAX_QLEN);
> +}
>   EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
>   
>   /**
> diff --git a/include/crypto/engine.h b/include/crypto/engine.h
> index 55d3dbc2498c..fe0dfea8bf07 100644
> --- a/include/crypto/engine.h
> +++ b/include/crypto/engine.h
> @@ -115,6 +115,8 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine,
>   int crypto_engine_start(struct crypto_engine *engine);
>   int crypto_engine_stop(struct crypto_engine *engine);
>   struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
> +struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
> +						int rmax, int qlen);
>   int crypto_engine_exit(struct crypto_engine *engine);
>   
>   #endif /* _CRYPTO_ENGINE_H */
>
Iuliana Prodan Jan. 17, 2020, 4:13 p.m. UTC | #2
On 1/14/2020 4:00 PM, Corentin Labbe wrote:
> Now everything is ready, this patch permits to choose the number of
> request to batch.
> 
> Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
> ---
>   crypto/crypto_engine.c  | 32 +++++++++++++++++++++++++++-----
>   include/crypto/engine.h |  2 ++
>   2 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
> index e23a398ba330..e9cd9ec9a732 100644
> --- a/crypto/crypto_engine.c
> +++ b/crypto/crypto_engine.c
> @@ -114,6 +114,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   	}
>   
>   	engine->ct = 0;
> +retry:
>   	/* Get the fist request from the engine queue to handle */
>   	backlog = crypto_get_backlog(&engine->queue);
>   	async_req = crypto_dequeue_request(&engine->queue);
> @@ -151,7 +152,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   		}
>   		engine->cur_reqs[engine->ct].prepared = true;
>   	}
> -	engine->ct++;
> +	if (++engine->ct < engine->rmax && engine->queue.qlen > 0) {
This should be in a critical section.

> +		spin_lock_irqsave(&engine->queue_lock, flags);
> +		goto retry;
> +	}
>   	if (!enginectx->op.do_one_request) {
>   		dev_err(engine->dev, "failed to do request\n");
>   		ret = -EINVAL;
> @@ -393,15 +397,18 @@ int crypto_engine_stop(struct crypto_engine *engine)
>   EXPORT_SYMBOL_GPL(crypto_engine_stop);
>   
>   /**
> - * crypto_engine_alloc_init - allocate crypto hardware engine structure and
> + * crypto_engine_alloc_init2 - allocate crypto hardware engine structure and
>    * initialize it.
>    * @dev: the device attached with one hardware engine
>    * @rt: whether this queue is set to run as a realtime task
> + * @rmax: The number of request that the engine can batch in one
> + * @qlen: The size of the crypto queue
>    *
>    * This must be called from context that can sleep.
>    * Return: the crypto engine structure on success, else NULL.
>    */
> -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
> +struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
> +						int rmax, int qlen)
>   {
>   	struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
>   	struct crypto_engine *engine;
> @@ -421,12 +428,12 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
>   	engine->priv_data = dev;
>   	snprintf(engine->name, sizeof(engine->name),
>   		 "%s-engine", dev_name(dev));
> -	engine->rmax = 1;
> +	engine->rmax = rmax;
>   	engine->cur_reqs = devm_kzalloc(dev, sizeof(struct cur_req) * engine->rmax, GFP_KERNEL);
>   	if (!engine->cur_reqs)
>   		return NULL;
>   
> -	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
> +	crypto_init_queue(&engine->queue, qlen);
>   	spin_lock_init(&engine->queue_lock);
>   
>   	engine->kworker = kthread_create_worker(0, "%s", engine->name);
> @@ -443,6 +450,21 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
>   
>   	return engine;
>   }
> +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init2);
> +
> +/**
> + * crypto_engine_alloc_init - allocate crypto hardware engine structure and
> + * initialize it.
> + * @dev: the device attached with one hardware engine
> + * @rt: whether this queue is set to run as a realtime task
> + *
> + * This must be called from context that can sleep.
> + * Return: the crypto engine structure on success, else NULL.
> + */
> +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
> +{
> +	return crypto_engine_alloc_init2(dev, rt, 1, CRYPTO_ENGINE_MAX_QLEN);
> +}
>   EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
>   
>   /**
> diff --git a/include/crypto/engine.h b/include/crypto/engine.h
> index 55d3dbc2498c..fe0dfea8bf07 100644
> --- a/include/crypto/engine.h
> +++ b/include/crypto/engine.h
> @@ -115,6 +115,8 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine,
>   int crypto_engine_start(struct crypto_engine *engine);
>   int crypto_engine_stop(struct crypto_engine *engine);
>   struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
> +struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
> +						int rmax, int qlen);
>   int crypto_engine_exit(struct crypto_engine *engine);
>   
>   #endif /* _CRYPTO_ENGINE_H */
>
diff mbox series

Patch

diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index e23a398ba330..e9cd9ec9a732 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -114,6 +114,7 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 	}
 
 	engine->ct = 0;
+retry:
 	/* Get the fist request from the engine queue to handle */
 	backlog = crypto_get_backlog(&engine->queue);
 	async_req = crypto_dequeue_request(&engine->queue);
@@ -151,7 +152,10 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 		}
 		engine->cur_reqs[engine->ct].prepared = true;
 	}
-	engine->ct++;
+	if (++engine->ct < engine->rmax && engine->queue.qlen > 0) {
+		spin_lock_irqsave(&engine->queue_lock, flags);
+		goto retry;
+	}
 	if (!enginectx->op.do_one_request) {
 		dev_err(engine->dev, "failed to do request\n");
 		ret = -EINVAL;
@@ -393,15 +397,18 @@  int crypto_engine_stop(struct crypto_engine *engine)
 EXPORT_SYMBOL_GPL(crypto_engine_stop);
 
 /**
- * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * crypto_engine_alloc_init2 - allocate crypto hardware engine structure and
  * initialize it.
  * @dev: the device attached with one hardware engine
  * @rt: whether this queue is set to run as a realtime task
+ * @rmax: The number of request that the engine can batch in one
+ * @qlen: The size of the crypto queue
  *
  * This must be called from context that can sleep.
  * Return: the crypto engine structure on success, else NULL.
  */
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
+						int rmax, int qlen)
 {
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
 	struct crypto_engine *engine;
@@ -421,12 +428,12 @@  struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 	engine->priv_data = dev;
 	snprintf(engine->name, sizeof(engine->name),
 		 "%s-engine", dev_name(dev));
-	engine->rmax = 1;
+	engine->rmax = rmax;
 	engine->cur_reqs = devm_kzalloc(dev, sizeof(struct cur_req) * engine->rmax, GFP_KERNEL);
 	if (!engine->cur_reqs)
 		return NULL;
 
-	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+	crypto_init_queue(&engine->queue, qlen);
 	spin_lock_init(&engine->queue_lock);
 
 	engine->kworker = kthread_create_worker(0, "%s", engine->name);
@@ -443,6 +450,21 @@  struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 
 	return engine;
 }
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init2);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+	return crypto_engine_alloc_init2(dev, rt, 1, CRYPTO_ENGINE_MAX_QLEN);
+}
 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
 
 /**
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 55d3dbc2498c..fe0dfea8bf07 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -115,6 +115,8 @@  void crypto_finalize_skcipher_request(struct crypto_engine *engine,
 int crypto_engine_start(struct crypto_engine *engine);
 int crypto_engine_stop(struct crypto_engine *engine);
 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+struct crypto_engine *crypto_engine_alloc_init2(struct device *dev, bool rt,
+						int rmax, int qlen);
 int crypto_engine_exit(struct crypto_engine *engine);
 
 #endif /* _CRYPTO_ENGINE_H */