@@ -535,6 +535,7 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @bklog: stored to determine if the request needs backlog
+ * @free: stored to determine if ahash_edesc needs to be freed
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* @sec4_sg: h/w link table
*/
@@ -543,6 +544,7 @@ struct ahash_edesc {
int src_nents;
int sec4_sg_bytes;
bool bklog;
+ bool free;
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
struct sec4_sg_entry sec4_sg[];
};
@@ -603,7 +605,8 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
memcpy(req->result, state->caam_ctx, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
@@ -652,7 +655,8 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src,
req->nbytes - state->next_buflen,
@@ -703,17 +707,28 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
dma_addr_t sh_desc_dma)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
-
- edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
- if (!edesc) {
- dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
- return NULL;
+ int edesc_size;
+
+ /* Check if there's enough space for edesc saved in req */
+ edesc_size = sizeof(*edesc) + sg_size;
+ if (edesc_size > (crypto_ahash_reqsize(ahash) -
+ sizeof(struct caam_hash_state))) {
+ /* allocate space for base edesc and link tables */
+ edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+ edesc->free = true;
+ } else {
+ /* get address for base edesc and link tables */
+ edesc = (struct ahash_edesc *)((u8 *)state +
+ sizeof(struct caam_hash_state));
+ /* clear memory */
+ memset(edesc, 0, sizeof(*edesc));
}
state->edesc = edesc;
@@ -778,7 +793,8 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
if (ret != -EINPROGRESS) {
ahash_unmap(jrdev, state->edesc, req, 0);
- kfree(state->edesc);
+ if (state->edesc->free)
+ kfree(state->edesc);
} else {
ret = 0;
}
@@ -813,7 +829,8 @@ static int ahash_enqueue_req(struct device *jrdev,
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
}
return ret;
@@ -941,7 +958,8 @@ static int ahash_update_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1002,7 +1020,8 @@ static int ahash_final_ctx(struct ahash_request *req)
digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1076,7 +1095,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1125,7 +1145,8 @@ static int ahash_digest(struct ahash_request *req)
req->nbytes);
if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1134,7 +1155,8 @@ static int ahash_digest(struct ahash_request *req)
ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1191,7 +1213,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1312,7 +1335,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1387,7 +1411,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1495,7 +1520,8 @@ static int ahash_update_first(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1782,6 +1808,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_update);
dma_addr_t dma_addr;
struct caam_drv_private *priv;
+ int extra_reqsize = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1862,7 +1889,15 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->enginectx.op.do_one_request = ahash_do_one_req;
- crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
+ /* Compute extra space needed for base edesc and link tables */
+ extra_reqsize = sizeof(struct ahash_edesc) +
+ /* link tables for src:
+ * 4 entries max + max 2 for remaining buf, aligned = 8
+ */
+ (8 * sizeof(struct sec4_sg_entry));
+
+ crypto_ahash_set_reqsize_dma(ahash,
+ sizeof(struct caam_hash_state) + extra_reqsize);
/*
* For keyed hash algorithms shared descriptors
@@ -1937,7 +1972,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;