@@ -2134,6 +2134,12 @@ config CRYPTO_POOL
help
Per-CPU pool of crypto requests ready for usage in atomic contexts.
+config CRYPTO_POOL_DEFAULT_SCRATCH_SIZE
+ hex "Per-CPU default scratch area size"
+ depends on CRYPTO_POOL
+ default 0x100
+ range 0x100 0x10000
+
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <crypto/pool.h>
+#include <linux/cpu.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/workqueue.h>
-static unsigned long scratch_size = DEFAULT_CRYPTO_POOL_SCRATCH_SZ;
+static unsigned long scratch_size = CONFIG_CRYPTO_POOL_DEFAULT_SCRATCH_SIZE;
static DEFINE_PER_CPU(void *, crypto_pool_scratch);
struct crypto_pool_entry {
@@ -19,28 +20,60 @@ struct crypto_pool_entry {
#define CPOOL_SIZE (PAGE_SIZE/sizeof(struct crypto_pool_entry))
static struct crypto_pool_entry cpool[CPOOL_SIZE];
-static int last_allocated;
+static unsigned int last_allocated;
static DEFINE_MUTEX(cpool_mutex);
-static int crypto_pool_scratch_alloc(void)
+static void __set_scratch(void *scratch)
{
- int cpu;
+ kfree(this_cpu_read(crypto_pool_scratch));
+ this_cpu_write(crypto_pool_scratch, scratch);
+}
- lockdep_assert_held(&cpool_mutex);
+/* Slow-path */
+/**
+ * crypto_pool_reserve_scratch - re-allocates scratch buffer, slow-path
+ * @size: request size for the scratch/temp buffer
+ */
+int crypto_pool_reserve_scratch(unsigned long size)
+{
+ int cpu, err = 0;
+ mutex_lock(&cpool_mutex);
+ if (size <= scratch_size) {
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(crypto_pool_scratch, cpu))
+ continue;
+ goto allocate_scratch;
+ }
+ mutex_unlock(&cpool_mutex);
+ return 0;
+ }
+allocate_scratch:
+ cpus_read_lock();
for_each_possible_cpu(cpu) {
- void *scratch = per_cpu(crypto_pool_scratch, cpu);
+ void *scratch;
- if (scratch)
- continue;
+ scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+ if (!scratch) {
+ err = -ENOMEM;
+ break;
+ }
- scratch = kmalloc_node(scratch_size, GFP_KERNEL,
- cpu_to_node(cpu));
- if (!scratch)
- return -ENOMEM;
- per_cpu(crypto_pool_scratch, cpu) = scratch;
+ if (!cpu_online(cpu)) {
+ kfree(per_cpu(crypto_pool_scratch, cpu));
+ per_cpu(crypto_pool_scratch, cpu) = scratch;
+ continue;
+ }
+ err = smp_call_function_single(cpu, __set_scratch, scratch, 1);
+ if (err) {
+ kfree(scratch);
+ break;
+ }
}
- return 0;
+
+ cpus_read_unlock();
+ mutex_unlock(&cpool_mutex);
+ return err;
}
static void crypto_pool_scratch_free(void)
@@ -139,10 +172,6 @@ int crypto_pool_alloc_ahash(const char *alg)
/* slow-path */
mutex_lock(&cpool_mutex);
- err = crypto_pool_scratch_alloc();
- if (err)
- goto out;
-
for (i = 0; i < last_allocated; i++) {
if (cpool[i].alg && !strcmp(cpool[i].alg, alg)) {
kref_get(&cpool[i].kref);
@@ -4,8 +4,6 @@
#include <crypto/hash.h>
-#define DEFAULT_CRYPTO_POOL_SCRATCH_SZ 128
-
struct crypto_pool {
void *scratch;
};
@@ -20,6 +18,7 @@ struct crypto_pool_ahash {
struct ahash_request *req;
};
+int crypto_pool_reserve_scratch(unsigned long size);
int crypto_pool_alloc_ahash(const char *alg);
void crypto_pool_add(unsigned int id);
void crypto_pool_release(unsigned int id);
Instead of having build-time hardcoded constant, reallocate scratch area, if needed by user. Different algos, different users may need different size of temp per-CPU buffer. Only up-sizing supported for simplicity. Signed-off-by: Dmitry Safonov <dima@arista.com> --- crypto/Kconfig | 6 ++++ crypto/crypto_pool.c | 65 +++++++++++++++++++++++++++++++------------ include/crypto/pool.h | 3 +- 3 files changed, 54 insertions(+), 20 deletions(-)