diff mbox

[RFC,09/10] crypto: caam/qi2 - add ablkcipher algorithms

Message ID 20170810174253.20951-10-horia.geanta@nxp.com (mailing list archive)
State New, archived
Headers show

Commit Message

Horia Geanta Aug. 10, 2017, 5:42 p.m. UTC
Add support to submit the following ablkcipher algorithms
via the DPSECI backend:
cbc({aes,des,des3_ede})
ctr(aes), rfc3686(ctr(aes))
xts(aes)

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
---
 drivers/crypto/caam/Kconfig       |   1 +
 drivers/crypto/caam/caamalg_qi2.c | 816 ++++++++++++++++++++++++++++++++++++++
 drivers/crypto/caam/caamalg_qi2.h |  23 +-
 3 files changed, 839 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e45d39d9007e..eb202e59c4fa 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -159,6 +159,7 @@  config CRYPTO_DEV_FSL_DPAA2_CAAM
 	tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
 	depends on FSL_MC_DPIO
 	select CRYPTO_DEV_FSL_CAAM_COMMON
+	select CRYPTO_BLKCIPHER
 	select CRYPTO_AUTHENC
 	select CRYPTO_AEAD
 	---help---
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 9dc5e1184e80..f32c518bc680 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1047,6 +1047,457 @@  static int rfc4543_setkey(struct crypto_aead *aead,
 	return ret;
 }
 
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *dev = ctx->dev;
+	struct caam_flc *flc;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc;
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+
+	memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx->key_dma)) {
+		dev_err(dev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = ctx->key;
+	ctx->cdata.key_inline = true;
+
+	/* ablkcipher_encrypt shared descriptor */
+	flc = &ctx->flc[ENCRYPT];
+	desc = flc->sh_desc;
+
+	cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+
+	flc->flc[1] = desc_len(desc); /* SDL */
+	flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+				      desc_bytes(desc), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, flc->flc_dma)) {
+		dev_err(dev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+	/* ablkcipher_decrypt shared descriptor */
+	flc = &ctx->flc[DECRYPT];
+	desc = flc->sh_desc;
+
+	cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+
+	flc->flc[1] = desc_len(desc); /* SDL */
+	flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+				      desc_bytes(desc), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, flc->flc_dma)) {
+		dev_err(dev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+	/* ablkcipher_givencrypt shared descriptor */
+	flc = &ctx->flc[GIVENCRYPT];
+	desc = flc->sh_desc;
+
+	cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
+					ivsize, is_rfc3686, ctx1_iv_off);
+
+	flc->flc[1] = desc_len(desc); /* SDL */
+	flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+				      desc_bytes(desc), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, flc->flc_dma)) {
+		dev_err(dev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *dev = ctx->dev;
+	struct caam_flc *flc;
+	u32 *desc;
+
+	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+		dev_err(dev, "key size mismatch\n");
+		crypto_ablkcipher_set_flags(ablkcipher,
+					    CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx->key_dma)) {
+		dev_err(dev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = ctx->key;
+	ctx->cdata.key_inline = true;
+
+	/* xts_ablkcipher_encrypt shared descriptor */
+	flc = &ctx->flc[ENCRYPT];
+	desc = flc->sh_desc;
+	cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+
+	flc->flc[1] = desc_len(desc); /* SDL */
+	flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+				      desc_bytes(desc), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, flc->flc_dma)) {
+		dev_err(dev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+	/* xts_ablkcipher_decrypt shared descriptor */
+	flc = &ctx->flc[DECRYPT];
+	desc = flc->sh_desc;
+
+	cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+
+	flc->flc[1] = desc_len(desc); /* SDL */
+	flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+				      desc_bytes(desc), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, flc->flc_dma)) {
+		dev_err(dev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, bool encrypt)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_request *req_ctx = ablkcipher_request_ctx(req);
+	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *dev = ctx->dev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	bool in_contig;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, qm_sg_ents;
+	struct dpaa2_sg_entry *sg_table;
+	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->dst != req->src)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(dev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(dev, "unable to map destination\n");
+			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(dev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, iv_dma)) {
+		dev_err(dev, "unable to map IV\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (mapped_src_nents == 1 &&
+	    iv_dma + ivsize == sg_dma_address(req->src)) {
+		in_contig = true;
+		qm_sg_ents = 0;
+	} else {
+		in_contig = false;
+		qm_sg_ents = 1 + mapped_src_nents;
+	}
+	dst_sg_idx = qm_sg_ents;
+
+	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+		dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
+			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc and link tables */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (unlikely(!edesc)) {
+		dev_err(dev, "could not allocate extended descriptor\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	sg_table = &edesc->sgt[0];
+	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+	if (!in_contig) {
+		dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+	}
+
+	if (mapped_dst_nents > 1)
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 dst_sg_idx, 0);
+
+	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+		dev_err(dev, "unable to map S/G table\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+	dpaa2_fl_set_final(in_fle, true);
+	dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
+	dpaa2_fl_set_len(out_fle, req->nbytes);
+
+	if (!in_contig) {
+		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+	} else {
+		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+		dpaa2_fl_set_addr(in_fle, iv_dma);
+	}
+
+	if (req->src == req->dst) {
+		if (!in_contig) {
+			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+			dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+					  sizeof(*sg_table));
+		} else {
+			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+		}
+	} else if (mapped_dst_nents > 1) {
+		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+				  sizeof(*sg_table));
+	} else {
+		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+	}
+
+	return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+	struct skcipher_givcrypt_request *greq)
+{
+	struct ablkcipher_request *req = &greq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_request *req_ctx = ablkcipher_request_ctx(req);
+	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *dev = ctx->dev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	bool out_contig;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct dpaa2_sg_entry *sg_table;
+	int dst_sg_idx, qm_sg_ents;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->dst != req->src)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(dev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(dev, "unable to map destination\n");
+			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(dev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = src_nents;
+		mapped_dst_nents = src_nents;
+	}
+
+	iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, iv_dma)) {
+		dev_err(dev, "unable to map IV\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	dst_sg_idx = qm_sg_ents;
+	if (mapped_dst_nents == 1 &&
+	    iv_dma + ivsize == sg_dma_address(req->dst)) {
+		out_contig = true;
+	} else {
+		out_contig = false;
+		qm_sg_ents += 1 + mapped_dst_nents;
+	}
+
+	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+		dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
+			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc and link tables */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(dev, "could not allocate extended descriptor\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	sg_table = &edesc->sgt[0];
+	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+	if (mapped_src_nents > 1)
+		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+	if (!out_contig) {
+		dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 dst_sg_idx + 1, 0);
+	}
+
+	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+		dev_err(dev, "unable to map S/G table\n");
+		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+	dpaa2_fl_set_final(in_fle, true);
+	dpaa2_fl_set_len(in_fle, req->nbytes);
+	dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
+
+	if (mapped_src_nents > 1) {
+		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+	} else {
+		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+	}
+
+	if (!out_contig) {
+		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+				  sizeof(*sg_table));
+	} else {
+		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+	}
+
+	return edesc;
+}
+
 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
 		       struct aead_request *req)
 {
@@ -1060,6 +1511,19 @@  static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
 }
 
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct caam_request *caam_req = ablkcipher_request_ctx(req);
+
+	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, caam_req->op_type,
+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
 static void aead_encrypt_done(void *cbk_ctx, u32 status)
 {
 	struct crypto_async_request *areq = cbk_ctx;
@@ -1189,6 +1653,133 @@  static int ipsec_gcm_decrypt(struct aead_request *req)
 	return aead_decrypt(req);
 }
 
+static void ablkcipher_done(void *cbk_ctx, u32 status)
+{
+	struct crypto_async_request *areq = cbk_ctx;
+	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+	struct caam_request *req_ctx = to_caam_req(areq);
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct ablkcipher_edesc *edesc = req_ctx->edesc;
+	int ecode = 0;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+	dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+	if (unlikely(status)) {
+		caam_qi2_strstatus(ctx->dev, status);
+		ecode = -EIO;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+	ablkcipher_unmap(ctx->dev, edesc, req);
+	qi_cache_free(edesc);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+				 ivsize, 0);
+
+	ablkcipher_request_complete(req, ecode);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct caam_request *caam_req = ablkcipher_request_ctx(req);
+	int ret;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	caam_req->flc = &ctx->flc[ENCRYPT];
+	caam_req->op_type = ENCRYPT;
+	caam_req->cbk = ablkcipher_done;
+	caam_req->ctx = &req->base;
+	caam_req->edesc = edesc;
+	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+	if (ret != -EINPROGRESS &&
+	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+		ablkcipher_unmap(ctx->dev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
+{
+	struct ablkcipher_request *req = &greq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct caam_request *caam_req = ablkcipher_request_ctx(req);
+	int ret;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(greq);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	caam_req->flc = &ctx->flc[GIVENCRYPT];
+	caam_req->op_type = GIVENCRYPT;
+	caam_req->cbk = ablkcipher_done;
+	caam_req->ctx = &req->base;
+	caam_req->edesc = edesc;
+	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+	if (ret != -EINPROGRESS &&
+	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+		ablkcipher_unmap(ctx->dev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct caam_request *caam_req = ablkcipher_request_ctx(req);
+	int ret;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	caam_req->flc = &ctx->flc[DECRYPT];
+	caam_req->op_type = DECRYPT;
+	caam_req->cbk = ablkcipher_done;
+	caam_req->ctx = &req->base;
+	caam_req->edesc = edesc;
+	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+	if (ret != -EINPROGRESS &&
+	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+		ablkcipher_unmap(ctx->dev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
 struct caam_crypto_alg {
 	struct list_head entry;
 	struct crypto_alg crypto_alg;
@@ -1213,6 +1804,15 @@  static int caam_cra_init(struct crypto_tfm *tfm)
 	return 0;
 }
 
+static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
+{
+	struct ablkcipher_tfm *ablkcipher_tfm =
+		crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
+
+	ablkcipher_tfm->reqsize = sizeof(struct caam_request);
+	return caam_cra_init(tfm);
+}
+
 static int caam_cra_init_aead(struct crypto_aead *tfm)
 {
 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
@@ -1239,11 +1839,135 @@  static void caam_exit_common(struct crypto_tfm *tfm)
 				 DMA_TO_DEVICE);
 }
 
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
 static void caam_cra_exit_aead(struct crypto_aead *tfm)
 {
 	caam_exit_common(crypto_aead_ctx(tfm));
 }
 
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam-qi2",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam-qi2",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam-qi2",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam-qi2",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam-qi2",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-caam-qi2",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = xts_ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+	}
+};
+
 static struct caam_aead_alg driver_aeads[] = {
 	{
 		.aead = {
@@ -2406,6 +3130,48 @@  static struct caam_aead_alg driver_aeads[] = {
 	},
 };
 
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_init = caam_cra_init_ablkcipher;
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_init = caam_cra_init_ablkcipher;
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	}
+
+	t_alg->caam.class1_alg_type = template->class1_alg_type;
+	t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+	return t_alg;
+}
+
 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
 {
 	struct aead_alg *alg = &t_alg->aead;
@@ -2878,6 +3644,8 @@  static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
 	return 0;
 }
 
+static struct list_head alg_list;
+
 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
 {
 	struct device *dev;
@@ -2959,6 +3727,44 @@  static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
 	}
 
 	/* register crypto algorithms the device supports */
+	INIT_LIST_HEAD(&alg_list);
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!priv->sec_attr.des_acc_num &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+			continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!priv->sec_attr.aes_acc_num &&
+		    (alg_sel == OP_ALG_ALGSEL_AES))
+			continue;
+
+		t_alg = caam_alg_alloc(alg);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			dev_warn(dev, "%s alg allocation failed: %d\n",
+				 alg->driver_name, err);
+			continue;
+		}
+		t_alg->caam.dev = dev;
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			dev_warn(dev, "%s alg registration failed: %d\n",
+				 t_alg->crypto_alg.cra_driver_name, err);
+			kfree(t_alg);
+			continue;
+		}
+
+		list_add_tail(&t_alg->entry, &alg_list);
+		registered = true;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
 		struct caam_aead_alg *t_alg = driver_aeads + i;
 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
@@ -3034,6 +3840,16 @@  static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
 			crypto_unregister_aead(&t_alg->aead);
 	}
 
+	if (alg_list.next) {
+		struct caam_crypto_alg *t_alg, *n;
+
+		list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+			crypto_unregister_alg(&t_alg->crypto_alg);
+			list_del(&t_alg->entry);
+			kfree(t_alg);
+		}
+	}
+
 	dpaa2_dpseci_disable(priv);
 	dpaa2_dpseci_dpio_free(priv);
 	dpaa2_dpseci_free(priv);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index bfb3a6416f91..8840721595c8 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -170,6 +170,27 @@  struct aead_edesc {
 	struct dpaa2_sg_entry sgt[0];
 };
 
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int qm_sg_bytes;
+	dma_addr_t qm_sg_dma;
+#define CAAM_QI_MAX_ABLKCIPHER_SG					    \
+	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+	 sizeof(struct dpaa2_sg_entry))
+	struct dpaa2_sg_entry sgt[0];
+};
+
 /**
  * caam_flc - Flow Context (FLC)
  * @flc: Flow Context options
@@ -200,7 +221,7 @@  enum optype {
  * @op_type: operation type
  * @cbk: Callback function to invoke when job is completed
  * @ctx: arbit context attached with request by the application
- * @edesc: extended descriptor; points to aead_edesc
+ * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
  */
 struct caam_request {
 	struct dpaa2_fl_entry fd_flt[2];