Message ID | 2a6cc4637621d2ef0d84754817128c43795cb022.1482424395.git.cyrille.pitchen@atmel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi Cyrille, [auto build test WARNING on cryptodev/master] [also build test WARNING on next-20161222] [cannot apply to v4.9] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Cyrille-Pitchen/crypto-atmel-authenc-add-support-to-authenc-hmac-shaX-Y-aes-modes/20161223-015820 base: https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master config: alpha-allyesconfig (attached as .config) compiler: alpha-linux-gnu-gcc (Debian 6.1.1-9) 6.1.1 20160705 reproduce: wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree make.cross ARCH=alpha All warnings (new ones prefixed by >>): In file included from include/linux/printk.h:305:0, from include/linux/kernel.h:13, from drivers/crypto/atmel-sha.c:17: drivers/crypto/atmel-sha.c: In function 'atmel_sha_xmit_cpu': >> drivers/crypto/atmel-sha.c:465:19: warning: format '%d' expects argument of type 'int', but argument 6 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ >> drivers/crypto/atmel-sha.c:465:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^~~~~~~ drivers/crypto/atmel-sha.c: In function 'atmel_sha_xmit_pdc': drivers/crypto/atmel-sha.c:494:19: warning: format '%d' expects argument of type 'int', but argument 6 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ drivers/crypto/atmel-sha.c:494:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^~~~~~~ drivers/crypto/atmel-sha.c: In function 'atmel_sha_xmit_dma': drivers/crypto/atmel-sha.c:541:19: warning: format '%d' expects argument of type 'int', but argument 6 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ drivers/crypto/atmel-sha.c:541:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", ^~~~~~~ drivers/crypto/atmel-sha.c: In function 'atmel_sha_xmit_dma_map': >> drivers/crypto/atmel-sha.c:620:26: warning: format '%u' expects argument of type 'unsigned int', but argument 3 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + ^ In file included from include/linux/printk.h:305:0, from include/linux/kernel.h:13, from drivers/crypto/atmel-sha.c:17: drivers/crypto/atmel-sha.c: In function 'atmel_sha_update_dma_slow': drivers/crypto/atmel-sha.c:641:19: warning: format '%u' expects argument of type 'unsigned int', but argument 4 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ drivers/crypto/atmel-sha.c:641:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", ^~~~~~~ drivers/crypto/atmel-sha.c: In function 'atmel_sha_update_dma_start': drivers/crypto/atmel-sha.c:669:19: warning: format '%u' expects argument of type 'unsigned int', but argument 6 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ drivers/crypto/atmel-sha.c:669:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", ^~~~~~~ drivers/crypto/atmel-sha.c:711:27: warning: format '%u' expects argument of type 'unsigned int', but argument 3 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_err(dd->dev, "dma %u bytes error\n", ^ In file included from include/linux/printk.h:305:0, from include/linux/kernel.h:13, from drivers/crypto/atmel-sha.c:17: drivers/crypto/atmel-sha.c: In function 'atmel_sha_finish': drivers/crypto/atmel-sha.c:891:19: warning: format '%d' expects argument of type 'int', but argument 6 has type 'size_t {aka long unsigned int}' [-Wformat=] dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], ^ include/linux/dynamic_debug.h:134:39: note: in definition of macro 'dynamic_dev_dbg' __dynamic_dev_dbg(&descriptor, dev, fmt, \ ^~~ drivers/crypto/atmel-sha.c:891:2: note: in expansion of macro 'dev_dbg' dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], ^~~~~~~ vim +465 drivers/crypto/atmel-sha.c ebc82efa Nicolas Royer 2012-07-01 459 size_t length, int final) ebc82efa Nicolas Royer 2012-07-01 460 { ebc82efa Nicolas Royer 2012-07-01 461 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); ebc82efa Nicolas Royer 2012-07-01 462 int count, len32; ebc82efa Nicolas Royer 2012-07-01 463 const u32 *buffer = (const u32 *)buf; ebc82efa Nicolas Royer 2012-07-01 464 d4905b38 Nicolas Royer 2013-02-20 @465 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", d4905b38 Nicolas Royer 2013-02-20 466 ctx->digcnt[1], ctx->digcnt[0], length, final); ebc82efa Nicolas Royer 2012-07-01 467 ebc82efa Nicolas Royer 2012-07-01 468 atmel_sha_write_ctrl(dd, 0); ebc82efa Nicolas Royer 2012-07-01 469 ebc82efa Nicolas Royer 2012-07-01 470 /* should be non-zero before next lines to disable clocks later */ d4905b38 Nicolas Royer 2013-02-20 471 ctx->digcnt[0] += length; d4905b38 Nicolas Royer 2013-02-20 472 if (ctx->digcnt[0] < length) d4905b38 Nicolas Royer 2013-02-20 473 ctx->digcnt[1]++; ebc82efa Nicolas Royer 2012-07-01 474 ebc82efa Nicolas Royer 2012-07-01 475 if (final) ebc82efa Nicolas Royer 2012-07-01 476 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ ebc82efa Nicolas Royer 2012-07-01 477 ebc82efa Nicolas Royer 2012-07-01 478 len32 = DIV_ROUND_UP(length, sizeof(u32)); ebc82efa Nicolas Royer 2012-07-01 479 ebc82efa Nicolas Royer 2012-07-01 480 dd->flags |= SHA_FLAGS_CPU; ebc82efa Nicolas Royer 2012-07-01 481 ebc82efa Nicolas Royer 2012-07-01 482 for (count = 0; count < len32; count++) ebc82efa Nicolas Royer 2012-07-01 483 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); ebc82efa Nicolas Royer 2012-07-01 484 ebc82efa Nicolas Royer 2012-07-01 485 return -EINPROGRESS; ebc82efa Nicolas Royer 2012-07-01 486 } ebc82efa Nicolas Royer 2012-07-01 487 ebc82efa Nicolas Royer 2012-07-01 488 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, ebc82efa Nicolas Royer 2012-07-01 489 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) ebc82efa Nicolas Royer 2012-07-01 490 { ebc82efa Nicolas Royer 2012-07-01 491 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); ebc82efa Nicolas Royer 2012-07-01 492 int len32; ebc82efa Nicolas Royer 2012-07-01 493 d4905b38 Nicolas Royer 2013-02-20 494 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", d4905b38 Nicolas Royer 2013-02-20 495 ctx->digcnt[1], ctx->digcnt[0], length1, final); ebc82efa Nicolas Royer 2012-07-01 496 ebc82efa Nicolas Royer 2012-07-01 497 len32 = DIV_ROUND_UP(length1, sizeof(u32)); ebc82efa Nicolas Royer 2012-07-01 498 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); ebc82efa Nicolas Royer 2012-07-01 499 atmel_sha_write(dd, SHA_TPR, dma_addr1); ebc82efa Nicolas Royer 2012-07-01 500 atmel_sha_write(dd, SHA_TCR, len32); ebc82efa Nicolas Royer 2012-07-01 501 ebc82efa Nicolas Royer 2012-07-01 502 len32 = DIV_ROUND_UP(length2, sizeof(u32)); ebc82efa Nicolas Royer 2012-07-01 503 atmel_sha_write(dd, SHA_TNPR, dma_addr2); ebc82efa Nicolas Royer 2012-07-01 504 atmel_sha_write(dd, SHA_TNCR, len32); ebc82efa Nicolas Royer 2012-07-01 505 ebc82efa Nicolas Royer 2012-07-01 506 atmel_sha_write_ctrl(dd, 1); ebc82efa Nicolas Royer 2012-07-01 507 ebc82efa Nicolas Royer 2012-07-01 508 /* should be non-zero before next lines to disable clocks later */ d4905b38 Nicolas Royer 2013-02-20 509 ctx->digcnt[0] += length1; d4905b38 Nicolas Royer 2013-02-20 510 if (ctx->digcnt[0] < length1) d4905b38 Nicolas Royer 2013-02-20 511 ctx->digcnt[1]++; ebc82efa Nicolas Royer 2012-07-01 512 ebc82efa Nicolas Royer 2012-07-01 513 if (final) ebc82efa Nicolas Royer 2012-07-01 514 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ ebc82efa Nicolas Royer 2012-07-01 515 ebc82efa Nicolas Royer 2012-07-01 516 dd->flags |= SHA_FLAGS_DMA_ACTIVE; ebc82efa Nicolas Royer 2012-07-01 517 ebc82efa Nicolas Royer 2012-07-01 518 /* Start DMA transfer */ ebc82efa Nicolas Royer 2012-07-01 519 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); ebc82efa Nicolas Royer 2012-07-01 520 ebc82efa Nicolas Royer 2012-07-01 521 return -EINPROGRESS; ebc82efa Nicolas Royer 2012-07-01 522 } ebc82efa Nicolas Royer 2012-07-01 523 d4905b38 Nicolas Royer 2013-02-20 524 static void atmel_sha_dma_callback(void *data) d4905b38 Nicolas Royer 2013-02-20 525 { d4905b38 Nicolas Royer 2013-02-20 526 struct atmel_sha_dev *dd = data; d4905b38 Nicolas Royer 2013-02-20 527 b48b114c Cyrille Pitchen 2016-12-22 528 dd->is_async = true; b48b114c Cyrille Pitchen 2016-12-22 529 d4905b38 Nicolas Royer 2013-02-20 530 /* dma_lch_in - completed - wait DATRDY */ d4905b38 Nicolas Royer 2013-02-20 531 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); d4905b38 Nicolas Royer 2013-02-20 532 } d4905b38 Nicolas Royer 2013-02-20 533 d4905b38 Nicolas Royer 2013-02-20 534 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, d4905b38 Nicolas Royer 2013-02-20 535 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) d4905b38 Nicolas Royer 2013-02-20 536 { d4905b38 Nicolas Royer 2013-02-20 537 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); d4905b38 Nicolas Royer 2013-02-20 538 struct dma_async_tx_descriptor *in_desc; d4905b38 Nicolas Royer 2013-02-20 539 struct scatterlist sg[2]; d4905b38 Nicolas Royer 2013-02-20 540 d4905b38 Nicolas Royer 2013-02-20 @541 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", d4905b38 Nicolas Royer 2013-02-20 542 ctx->digcnt[1], ctx->digcnt[0], length1, final); d4905b38 Nicolas Royer 2013-02-20 543 d4905b38 Nicolas Royer 2013-02-20 544 dd->dma_lch_in.dma_conf.src_maxburst = 16; d4905b38 Nicolas Royer 2013-02-20 545 dd->dma_lch_in.dma_conf.dst_maxburst = 16; d4905b38 Nicolas Royer 2013-02-20 546 d4905b38 Nicolas Royer 2013-02-20 547 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); d4905b38 Nicolas Royer 2013-02-20 548 d4905b38 Nicolas Royer 2013-02-20 549 if (length2) { d4905b38 Nicolas Royer 2013-02-20 550 sg_init_table(sg, 2); d4905b38 Nicolas Royer 2013-02-20 551 sg_dma_address(&sg[0]) = dma_addr1; d4905b38 Nicolas Royer 2013-02-20 552 sg_dma_len(&sg[0]) = length1; d4905b38 Nicolas Royer 2013-02-20 553 sg_dma_address(&sg[1]) = dma_addr2; d4905b38 Nicolas Royer 2013-02-20 554 sg_dma_len(&sg[1]) = length2; d4905b38 Nicolas Royer 2013-02-20 555 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, d4905b38 Nicolas Royer 2013-02-20 556 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); d4905b38 Nicolas Royer 2013-02-20 557 } else { d4905b38 Nicolas Royer 2013-02-20 558 sg_init_table(sg, 1); d4905b38 Nicolas Royer 2013-02-20 559 sg_dma_address(&sg[0]) = dma_addr1; d4905b38 Nicolas Royer 2013-02-20 560 sg_dma_len(&sg[0]) = length1; d4905b38 Nicolas Royer 2013-02-20 561 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, d4905b38 Nicolas Royer 2013-02-20 562 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); d4905b38 Nicolas Royer 2013-02-20 563 } d4905b38 Nicolas Royer 2013-02-20 564 if (!in_desc) b48b114c Cyrille Pitchen 2016-12-22 565 atmel_sha_complete(dd, -EINVAL); d4905b38 Nicolas Royer 2013-02-20 566 d4905b38 Nicolas Royer 2013-02-20 567 in_desc->callback = atmel_sha_dma_callback; d4905b38 Nicolas Royer 2013-02-20 568 in_desc->callback_param = dd; d4905b38 Nicolas Royer 2013-02-20 569 d4905b38 Nicolas Royer 2013-02-20 570 atmel_sha_write_ctrl(dd, 1); d4905b38 Nicolas Royer 2013-02-20 571 d4905b38 Nicolas Royer 2013-02-20 572 /* should be non-zero before next lines to disable clocks later */ d4905b38 Nicolas Royer 2013-02-20 573 ctx->digcnt[0] += length1; d4905b38 Nicolas Royer 2013-02-20 574 if (ctx->digcnt[0] < length1) d4905b38 Nicolas Royer 2013-02-20 575 ctx->digcnt[1]++; d4905b38 Nicolas Royer 2013-02-20 576 d4905b38 Nicolas Royer 2013-02-20 577 if (final) d4905b38 Nicolas Royer 2013-02-20 578 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ d4905b38 Nicolas Royer 2013-02-20 579 d4905b38 Nicolas Royer 2013-02-20 580 dd->flags |= SHA_FLAGS_DMA_ACTIVE; d4905b38 Nicolas Royer 2013-02-20 581 d4905b38 Nicolas Royer 2013-02-20 582 /* Start DMA transfer */ d4905b38 Nicolas Royer 2013-02-20 583 dmaengine_submit(in_desc); d4905b38 Nicolas Royer 2013-02-20 584 dma_async_issue_pending(dd->dma_lch_in.chan); d4905b38 Nicolas Royer 2013-02-20 585 d4905b38 Nicolas Royer 2013-02-20 586 return -EINPROGRESS; d4905b38 Nicolas Royer 2013-02-20 587 } d4905b38 Nicolas Royer 2013-02-20 588 d4905b38 Nicolas Royer 2013-02-20 589 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, d4905b38 Nicolas Royer 2013-02-20 590 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) d4905b38 Nicolas Royer 2013-02-20 591 { d4905b38 Nicolas Royer 2013-02-20 592 if (dd->caps.has_dma) d4905b38 Nicolas Royer 2013-02-20 593 return atmel_sha_xmit_dma(dd, dma_addr1, length1, d4905b38 Nicolas Royer 2013-02-20 594 dma_addr2, length2, final); d4905b38 Nicolas Royer 2013-02-20 595 else d4905b38 Nicolas Royer 2013-02-20 596 return atmel_sha_xmit_pdc(dd, dma_addr1, length1, d4905b38 Nicolas Royer 2013-02-20 597 dma_addr2, length2, final); d4905b38 Nicolas Royer 2013-02-20 598 } d4905b38 Nicolas Royer 2013-02-20 599 ebc82efa Nicolas Royer 2012-07-01 600 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) ebc82efa Nicolas Royer 2012-07-01 601 { ebc82efa Nicolas Royer 2012-07-01 602 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); ebc82efa Nicolas Royer 2012-07-01 603 int bufcnt; ebc82efa Nicolas Royer 2012-07-01 604 ebc82efa Nicolas Royer 2012-07-01 605 atmel_sha_append_sg(ctx); ebc82efa Nicolas Royer 2012-07-01 606 atmel_sha_fill_padding(ctx, 0); ebc82efa Nicolas Royer 2012-07-01 607 bufcnt = ctx->bufcnt; ebc82efa Nicolas Royer 2012-07-01 608 ctx->bufcnt = 0; ebc82efa Nicolas Royer 2012-07-01 609 ebc82efa Nicolas Royer 2012-07-01 610 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); ebc82efa Nicolas Royer 2012-07-01 611 } ebc82efa Nicolas Royer 2012-07-01 612 ebc82efa Nicolas Royer 2012-07-01 613 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, ebc82efa Nicolas Royer 2012-07-01 614 struct atmel_sha_reqctx *ctx, ebc82efa Nicolas Royer 2012-07-01 615 size_t length, int final) ebc82efa Nicolas Royer 2012-07-01 616 { ebc82efa Nicolas Royer 2012-07-01 617 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, d4905b38 Nicolas Royer 2013-02-20 618 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); ebc82efa Nicolas Royer 2012-07-01 619 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { ebc82efa Nicolas Royer 2012-07-01 @620 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + d4905b38 Nicolas Royer 2013-02-20 621 ctx->block_size); b48b114c Cyrille Pitchen 2016-12-22 622 atmel_sha_complete(dd, -EINVAL); ebc82efa Nicolas Royer 2012-07-01 623 } :::::: The code at line 465 was first introduced by commit :::::: d4905b38d1f6b60761a6fd16f45ebd1fac8b6e1f crypto: atmel-sha - add support for latest release of the IP (0x410) :::::: TO: Nicolas Royer <nicolas@eukrea.com> :::::: CC: Herbert Xu <herbert@gondor.apana.org.au> --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Am Donnerstag, 22. Dezember 2016, 17:38:00 CET schrieb Cyrille Pitchen: Hi Cyrille, > This patchs allows to combine the AES and SHA hardware accelerators on > some Atmel SoCs. Doing so, AES blocks are only written to/read from the > AES hardware. Those blocks are also transferred from the AES to the SHA > accelerator internally, without additionnal accesses to the system busses. > > Hence, the AES and SHA accelerators work in parallel to process all the > data blocks, instead of serializing the process by (de)crypting those > blocks first then authenticating them after like the generic > crypto/authenc.c driver does. > > Of course, both the AES and SHA hardware accelerators need to be available > before we can start to process the data blocks. Hence we use their crypto > request queue to synchronize both drivers. > > Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com> > --- > drivers/crypto/Kconfig | 12 + > drivers/crypto/atmel-aes-regs.h | 16 ++ > drivers/crypto/atmel-aes.c | 471 > +++++++++++++++++++++++++++++++++++++++- drivers/crypto/atmel-authenc.h | > 64 ++++++ > drivers/crypto/atmel-sha-regs.h | 14 ++ > drivers/crypto/atmel-sha.c | 344 +++++++++++++++++++++++++++-- > 6 files changed, 906 insertions(+), 15 deletions(-) > create mode 100644 drivers/crypto/atmel-authenc.h > > diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig > index 79564785ae30..719a868d8ea1 100644 > --- a/drivers/crypto/Kconfig > +++ b/drivers/crypto/Kconfig > @@ -415,6 +415,18 @@ config CRYPTO_DEV_BFIN_CRC > Newer Blackfin processors have CRC hardware. Select this if you > want to use the Blackfin CRC module. > > +config CRYPTO_DEV_ATMEL_AUTHENC > + tristate "Support for Atmel IPSEC/SSL hw accelerator" > + depends on (ARCH_AT91 && HAS_DMA) || COMPILE_TEST > + select CRYPTO_AUTHENC > + select CRYPTO_DEV_ATMEL_AES > + select CRYPTO_DEV_ATMEL_SHA > + help > + Some Atmel processors can combine the AES and SHA hw accelerators > + to enhance support of IPSEC/SSL. > + Select this if you want to use the Atmel modules for > + authenc(hmac(shaX),Y(cbc)) algorithms. > + > config CRYPTO_DEV_ATMEL_AES > tristate "Support for Atmel AES hw accelerator" > depends on HAS_DMA > diff --git a/drivers/crypto/atmel-aes-regs.h > b/drivers/crypto/atmel-aes-regs.h index 0ec04407b533..7694679802b3 100644 > --- a/drivers/crypto/atmel-aes-regs.h > +++ b/drivers/crypto/atmel-aes-regs.h > @@ -68,6 +68,22 @@ > #define AES_CTRR 0x98 > #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) > > +#define AES_EMR 0xb0 > +#define AES_EMR_APEN BIT(0) /* Auto Padding Enable */ > +#define AES_EMR_APM BIT(1) /* Auto Padding Mode */ > +#define AES_EMR_APM_IPSEC 0x0 > +#define AES_EMR_APM_SSL BIT(1) > +#define AES_EMR_PLIPEN BIT(4) /* PLIP Enable */ > +#define AES_EMR_PLIPD BIT(5) /* PLIP Decipher */ > +#define AES_EMR_PADLEN_MASK (0xFu << 8) > +#define AES_EMR_PADLEN_OFFSET 8 > +#define AES_EMR_PADLEN(padlen) (((padlen) << AES_EMR_PADLEN_OFFSET) &\ > + AES_EMR_PADLEN_MASK) > +#define AES_EMR_NHEAD_MASK (0xFu << 16) > +#define AES_EMR_NHEAD_OFFSET 16 > +#define AES_EMR_NHEAD(nhead) (((nhead) << AES_EMR_NHEAD_OFFSET) &\ > + AES_EMR_NHEAD_MASK) > + > #define AES_TWR(x) (0xc0 + ((x) * 0x04)) > #define AES_ALPHAR(x) (0xd0 + ((x) * 0x04)) > > diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c > index 9fd2f63b8bc0..3c651e0c3113 100644 > --- a/drivers/crypto/atmel-aes.c > +++ b/drivers/crypto/atmel-aes.c > @@ -41,6 +41,7 @@ > #include <linux/platform_data/crypto-atmel.h> > #include <dt-bindings/dma/at91.h> > #include "atmel-aes-regs.h" > +#include "atmel-authenc.h" > > #define ATMEL_AES_PRIORITY 300 > > @@ -78,6 +79,7 @@ > #define AES_FLAGS_INIT BIT(2) > #define AES_FLAGS_BUSY BIT(3) > #define AES_FLAGS_DUMP_REG BIT(4) > +#define AES_FLAGS_OWN_SHA BIT(5) > > #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) > > @@ -92,6 +94,7 @@ struct atmel_aes_caps { > bool has_ctr32; > bool has_gcm; > bool has_xts; > + bool has_authenc; > u32 max_burst_size; > }; > > @@ -144,10 +147,31 @@ struct atmel_aes_xts_ctx { > u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; > }; > > +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC > +struct atmel_aes_authenc_ctx { > + struct atmel_aes_base_ctx base; > + struct atmel_sha_authenc_ctx *auth; > +}; > +#endif > + > struct atmel_aes_reqctx { > unsigned long mode; > }; > > +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC > +struct atmel_aes_authenc_reqctx { > + struct atmel_aes_reqctx base; > + > + struct scatterlist src[2]; > + struct scatterlist dst[2]; > + size_t textlen; > + u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)]; > + > + /* auth_req MUST be place last. */ > + struct ahash_request auth_req; > +}; > +#endif > + > struct atmel_aes_dma { > struct dma_chan *chan; > struct scatterlist *sg; > @@ -291,6 +315,9 @@ static const char *atmel_aes_reg_name(u32 offset, char > *tmp, size_t sz) snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> > 2); > break; > > + case AES_EMR: > + return "EMR"; > + > case AES_TWR(0): > case AES_TWR(1): > case AES_TWR(2): > @@ -463,8 +490,16 @@ static inline bool atmel_aes_is_encrypt(const struct > atmel_aes_dev *dd) return (dd->flags & AES_FLAGS_ENCRYPT); > } > > +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC > +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); > +#endif > + > static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) > { > +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC > + atmel_aes_authenc_complete(dd, err); > +#endif > + > clk_disable(dd->iclk); > dd->flags &= ~AES_FLAGS_BUSY; > > @@ -1931,6 +1966,407 @@ static struct crypto_alg aes_xts_alg = { > } > }; > > +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC > +/* authenc aead functions */ > + > +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); > +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, > + bool is_async); > +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, > + bool is_async); > +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd); > +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, > + bool is_async); > + > +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + > + if (err && (dd->flags & AES_FLAGS_OWN_SHA)) > + atmel_sha_authenc_abort(&rctx->auth_req); > + dd->flags &= ~AES_FLAGS_OWN_SHA; > +} > + > +static int atmel_aes_authenc_copy_assoc(struct aead_request *req) > +{ > + size_t buflen, assoclen = req->assoclen; > + off_t skip = 0; > + u8 buf[256]; > + > + while (assoclen) { > + buflen = min_t(size_t, assoclen, sizeof(buf)); > + > + if (sg_pcopy_to_buffer(req->src, sg_nents(req->src), > + buf, buflen, skip) != buflen) > + return -EINVAL; > + > + if (sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), > + buf, buflen, skip) != buflen) > + return -EINVAL; > + > + skip += buflen; > + assoclen -= buflen; > + } This seems to be a very expansive operation. Wouldn't it be easier, leaner and with one less memcpy to use the approach of crypto_authenc_copy_assoc? Instead of copying crypto_authenc_copy_assoc, what about carving the logic in crypto/authenc.c out into a generic aead helper code as we need to add that to other AEAD implementations? > + > + return 0; > +} > + > +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + struct crypto_aead *tfm = crypto_aead_reqtfm(req); > + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); > + int err; > + > + atmel_aes_set_mode(dd, &rctx->base); > + > + err = atmel_aes_hw_init(dd); > + if (err) > + return atmel_aes_complete(dd, err); > + > + return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth, > + atmel_aes_authenc_init, dd); > +} > + > +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, > + bool is_async) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + > + if (is_async) > + dd->is_async = true; > + if (err) > + return atmel_aes_complete(dd, err); > + > + /* If here, we've got the ownership of the SHA device. */ > + dd->flags |= AES_FLAGS_OWN_SHA; > + > + /* Configure the SHA device. */ > + return atmel_sha_authenc_init(&rctx->auth_req, > + req->src, req->assoclen, > + rctx->textlen, > + atmel_aes_authenc_transfer, dd); > +} > + > +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, > + bool is_async) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + bool enc = atmel_aes_is_encrypt(dd); > + struct scatterlist *src, *dst; > + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; > + u32 emr; > + > + if (is_async) > + dd->is_async = true; > + if (err) > + return atmel_aes_complete(dd, err); > + > + /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */ > + src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); > + dst = src; > + > + if (req->src != req->dst) { > + err = atmel_aes_authenc_copy_assoc(req); > + if (err) > + return atmel_aes_complete(dd, err); > + > + dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); > + } > + > + /* Configure the AES device. */ > + memcpy(iv, req->iv, sizeof(iv)); > + > + /* > + * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to > + * 'true' even if the data transfer is actually performed by the CPU (so > + * not by the DMA) because we must force the AES_MR_SMOD bitfield to the > + * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD > + * must be set to *_MR_SMOD_IDATAR0. > + */ > + atmel_aes_write_ctrl(dd, true, iv); > + emr = AES_EMR_PLIPEN; > + if (!enc) > + emr |= AES_EMR_PLIPD; > + atmel_aes_write(dd, AES_EMR, emr); > + > + /* Transfer data. */ > + return atmel_aes_dma_start(dd, src, dst, rctx->textlen, > + atmel_aes_authenc_digest); > +} > + > +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + > + /* atmel_sha_authenc_final() releases the SHA device. */ > + dd->flags &= ~AES_FLAGS_OWN_SHA; > + return atmel_sha_authenc_final(&rctx->auth_req, > + rctx->digest, sizeof(rctx->digest), > + atmel_aes_authenc_final, dd); > +} > + > +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, > + bool is_async) > +{ > + struct aead_request *req = aead_request_cast(dd->areq); > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + struct crypto_aead *tfm = crypto_aead_reqtfm(req); > + bool enc = atmel_aes_is_encrypt(dd); > + u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest; > + u32 offs, authsize; > + > + if (is_async) > + dd->is_async = true; > + if (err) > + goto complete; > + > + offs = req->assoclen + rctx->textlen; > + authsize = crypto_aead_authsize(tfm); > + if (enc) { > + scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1); > + } else { > + scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0); > + if (crypto_memneq(idigest, odigest, authsize)) > + err = -EBADMSG; > + } > + > +complete: > + return atmel_aes_complete(dd, err); > +} > + > +static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, > + unsigned int keylen) > +{ > + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); > + struct crypto_authenc_keys keys; > + u32 flags; > + int err; > + > + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) > + goto badkey; > + > + if (keys.enckeylen > sizeof(ctx->base.key)) > + goto badkey; > + > + /* Save auth key. */ > + flags = crypto_aead_get_flags(tfm); > + err = atmel_sha_authenc_setkey(ctx->auth, > + keys.authkey, keys.authkeylen, > + &flags); > + crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK); > + if (err) > + return err; > + > + /* Save enc key. */ > + ctx->base.keylen = keys.enckeylen; > + memcpy(ctx->base.key, keys.enckey, keys.enckeylen); memzero_explicit(keys) please > + > + return 0; > + > +badkey: > + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); > + return -EINVAL; > +} > + > +static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm, > + unsigned long auth_mode) > +{ > + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); > + unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize(); > + > + ctx->auth = atmel_sha_authenc_spawn(auth_mode); > + if (IS_ERR(ctx->auth)) > + return PTR_ERR(ctx->auth); > + > + crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) + > + auth_reqsize)); > + ctx->base.start = atmel_aes_authenc_start; > + > + return 0; > +} > + > +static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm) > +{ > + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1); > +} > + > +static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm) > +{ > + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224); > +} > + > +static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm) > +{ > + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256); > +} > + > +static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm) > +{ > + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384); > +} > + > +static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm) > +{ > + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512); > +} > + > +static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm) > +{ > + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); > + > + atmel_sha_authenc_free(ctx->auth); > +} > + > +static int atmel_aes_authenc_crypt(struct aead_request *req, > + unsigned long mode) > +{ > + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > + struct crypto_aead *tfm = crypto_aead_reqtfm(req); > + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); > + u32 authsize = crypto_aead_authsize(tfm); > + bool enc = (mode & AES_FLAGS_ENCRYPT); > + struct atmel_aes_dev *dd; > + > + /* Compute text length. */ > + rctx->textlen = req->cryptlen - (enc ? 0 : authsize); Is there somewhere a check that authsize is always < req->cryptlen (at least it escaped me)? Note, this logic will be exposed to user space which may do funky things. > + > + /* > + * Currently, empty messages are not supported yet: > + * the SHA auto-padding can be used only on non-empty messages. > + * Hence a special case needs to be implemented for empty message. > + */ > + if (!rctx->textlen && !req->assoclen) > + return -EINVAL; > + > + rctx->base.mode = mode; > + ctx->block_size = AES_BLOCK_SIZE; > + > + dd = atmel_aes_find_dev(ctx); > + if (!dd) > + return -ENODEV; > + > + return atmel_aes_handle_queue(dd, &req->base); Ciao Stephan
Hi Stephan, Le 23/12/2016 à 12:34, Stephan Müller a écrit : > Am Donnerstag, 22. Dezember 2016, 17:38:00 CET schrieb Cyrille Pitchen: > > Hi Cyrille, > >> This patchs allows to combine the AES and SHA hardware accelerators on >> some Atmel SoCs. Doing so, AES blocks are only written to/read from the >> AES hardware. Those blocks are also transferred from the AES to the SHA >> accelerator internally, without additionnal accesses to the system busses. >> >> Hence, the AES and SHA accelerators work in parallel to process all the >> data blocks, instead of serializing the process by (de)crypting those >> blocks first then authenticating them after like the generic >> crypto/authenc.c driver does. >> >> Of course, both the AES and SHA hardware accelerators need to be available >> before we can start to process the data blocks. Hence we use their crypto >> request queue to synchronize both drivers. >> >> Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com> >> --- >> drivers/crypto/Kconfig | 12 + >> drivers/crypto/atmel-aes-regs.h | 16 ++ >> drivers/crypto/atmel-aes.c | 471 >> +++++++++++++++++++++++++++++++++++++++- drivers/crypto/atmel-authenc.h | >> 64 ++++++ >> drivers/crypto/atmel-sha-regs.h | 14 ++ >> drivers/crypto/atmel-sha.c | 344 +++++++++++++++++++++++++++-- >> 6 files changed, 906 insertions(+), 15 deletions(-) >> create mode 100644 drivers/crypto/atmel-authenc.h >> >> diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig >> index 79564785ae30..719a868d8ea1 100644 >> --- a/drivers/crypto/Kconfig >> +++ b/drivers/crypto/Kconfig >> @@ -415,6 +415,18 @@ config CRYPTO_DEV_BFIN_CRC >> Newer Blackfin processors have CRC hardware. Select this if you >> want to use the Blackfin CRC module. >> >> +config CRYPTO_DEV_ATMEL_AUTHENC >> + tristate "Support for Atmel IPSEC/SSL hw accelerator" >> + depends on (ARCH_AT91 && HAS_DMA) || COMPILE_TEST >> + select CRYPTO_AUTHENC >> + select CRYPTO_DEV_ATMEL_AES >> + select CRYPTO_DEV_ATMEL_SHA >> + help >> + Some Atmel processors can combine the AES and SHA hw accelerators >> + to enhance support of IPSEC/SSL. >> + Select this if you want to use the Atmel modules for >> + authenc(hmac(shaX),Y(cbc)) algorithms. >> + >> config CRYPTO_DEV_ATMEL_AES >> tristate "Support for Atmel AES hw accelerator" >> depends on HAS_DMA >> diff --git a/drivers/crypto/atmel-aes-regs.h >> b/drivers/crypto/atmel-aes-regs.h index 0ec04407b533..7694679802b3 100644 >> --- a/drivers/crypto/atmel-aes-regs.h >> +++ b/drivers/crypto/atmel-aes-regs.h >> @@ -68,6 +68,22 @@ >> #define AES_CTRR 0x98 >> #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) >> >> +#define AES_EMR 0xb0 >> +#define AES_EMR_APEN BIT(0) /* Auto Padding Enable */ >> +#define AES_EMR_APM BIT(1) /* Auto Padding Mode */ >> +#define AES_EMR_APM_IPSEC 0x0 >> +#define AES_EMR_APM_SSL BIT(1) >> +#define AES_EMR_PLIPEN BIT(4) /* PLIP Enable */ >> +#define AES_EMR_PLIPD BIT(5) /* PLIP Decipher */ >> +#define AES_EMR_PADLEN_MASK (0xFu << 8) >> +#define AES_EMR_PADLEN_OFFSET 8 >> +#define AES_EMR_PADLEN(padlen) (((padlen) << AES_EMR_PADLEN_OFFSET) &\ >> + AES_EMR_PADLEN_MASK) >> +#define AES_EMR_NHEAD_MASK (0xFu << 16) >> +#define AES_EMR_NHEAD_OFFSET 16 >> +#define AES_EMR_NHEAD(nhead) (((nhead) << AES_EMR_NHEAD_OFFSET) &\ >> + AES_EMR_NHEAD_MASK) >> + >> #define AES_TWR(x) (0xc0 + ((x) * 0x04)) >> #define AES_ALPHAR(x) (0xd0 + ((x) * 0x04)) >> >> diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c >> index 9fd2f63b8bc0..3c651e0c3113 100644 >> --- a/drivers/crypto/atmel-aes.c >> +++ b/drivers/crypto/atmel-aes.c >> @@ -41,6 +41,7 @@ >> #include <linux/platform_data/crypto-atmel.h> >> #include <dt-bindings/dma/at91.h> >> #include "atmel-aes-regs.h" >> +#include "atmel-authenc.h" >> >> #define ATMEL_AES_PRIORITY 300 >> >> @@ -78,6 +79,7 @@ >> #define AES_FLAGS_INIT BIT(2) >> #define AES_FLAGS_BUSY BIT(3) >> #define AES_FLAGS_DUMP_REG BIT(4) >> +#define AES_FLAGS_OWN_SHA BIT(5) >> >> #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) >> >> @@ -92,6 +94,7 @@ struct atmel_aes_caps { >> bool has_ctr32; >> bool has_gcm; >> bool has_xts; >> + bool has_authenc; >> u32 max_burst_size; >> }; >> >> @@ -144,10 +147,31 @@ struct atmel_aes_xts_ctx { >> u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; >> }; >> >> +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC >> +struct atmel_aes_authenc_ctx { >> + struct atmel_aes_base_ctx base; >> + struct atmel_sha_authenc_ctx *auth; >> +}; >> +#endif >> + >> struct atmel_aes_reqctx { >> unsigned long mode; >> }; >> >> +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC >> +struct atmel_aes_authenc_reqctx { >> + struct atmel_aes_reqctx base; >> + >> + struct scatterlist src[2]; >> + struct scatterlist dst[2]; >> + size_t textlen; >> + u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)]; >> + >> + /* auth_req MUST be place last. */ >> + struct ahash_request auth_req; >> +}; >> +#endif >> + >> struct atmel_aes_dma { >> struct dma_chan *chan; >> struct scatterlist *sg; >> @@ -291,6 +315,9 @@ static const char *atmel_aes_reg_name(u32 offset, char >> *tmp, size_t sz) snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> >> 2); >> break; >> >> + case AES_EMR: >> + return "EMR"; >> + >> case AES_TWR(0): >> case AES_TWR(1): >> case AES_TWR(2): >> @@ -463,8 +490,16 @@ static inline bool atmel_aes_is_encrypt(const struct >> atmel_aes_dev *dd) return (dd->flags & AES_FLAGS_ENCRYPT); >> } >> >> +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC >> +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); >> +#endif >> + >> static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) >> { >> +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC >> + atmel_aes_authenc_complete(dd, err); >> +#endif >> + >> clk_disable(dd->iclk); >> dd->flags &= ~AES_FLAGS_BUSY; >> >> @@ -1931,6 +1966,407 @@ static struct crypto_alg aes_xts_alg = { >> } >> }; >> >> +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC >> +/* authenc aead functions */ >> + >> +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); >> +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, >> + bool is_async); >> +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, >> + bool is_async); >> +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd); >> +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, >> + bool is_async); >> + >> +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + >> + if (err && (dd->flags & AES_FLAGS_OWN_SHA)) >> + atmel_sha_authenc_abort(&rctx->auth_req); >> + dd->flags &= ~AES_FLAGS_OWN_SHA; >> +} >> + >> +static int atmel_aes_authenc_copy_assoc(struct aead_request *req) >> +{ >> + size_t buflen, assoclen = req->assoclen; >> + off_t skip = 0; >> + u8 buf[256]; >> + >> + while (assoclen) { >> + buflen = min_t(size_t, assoclen, sizeof(buf)); >> + >> + if (sg_pcopy_to_buffer(req->src, sg_nents(req->src), >> + buf, buflen, skip) != buflen) >> + return -EINVAL; >> + >> + if (sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), >> + buf, buflen, skip) != buflen) >> + return -EINVAL; >> + >> + skip += buflen; >> + assoclen -= buflen; >> + } > > This seems to be a very expansive operation. Wouldn't it be easier, leaner and > with one less memcpy to use the approach of crypto_authenc_copy_assoc? > > Instead of copying crypto_authenc_copy_assoc, what about carving the logic in > crypto/authenc.c out into a generic aead helper code as we need to add that to > other AEAD implementations? Before writing this function, I checked how the crypto/authenc.c driver handles the copy of the associated data, hence crypto_authenc_copy_assoc(). I have to admit I didn't perform any benchmark to compare the two implementation but I just tried to understand how crypto_authenc_copy_assoc() works. At the first look, this function seems very simple but I guess all the black magic is hidden by the call of crypto_skcipher_encrypt() on the default null transform, which is implemented using the ecb(cipher_null) algorithm. When I wrote my function I thought that this ecb(cipher_null) algorithm was implemented by combining crypto_ecb_crypt() from crypto/ecb.c with null_crypt() from crypto/crypto_null.c. Hence I thought there would be much function call overhead to copy only few bytes but now checking again I realize that the ecb(cipher_null) algorithm is directly implemented by skcipher_null_crypt() still from crypto/crypto_null.c. So yes, maybe you're right: it could be better to reuse what was done in crypto_authenc_copy_assoc() from crypto/authenc.c. This way we could need twice less memcpy() hence I agree with you. >> + >> + return 0; >> +} >> + >> +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + struct crypto_aead *tfm = crypto_aead_reqtfm(req); >> + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); >> + int err; >> + >> + atmel_aes_set_mode(dd, &rctx->base); >> + >> + err = atmel_aes_hw_init(dd); >> + if (err) >> + return atmel_aes_complete(dd, err); >> + >> + return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth, >> + atmel_aes_authenc_init, dd); >> +} >> + >> +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, >> + bool is_async) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + >> + if (is_async) >> + dd->is_async = true; >> + if (err) >> + return atmel_aes_complete(dd, err); >> + >> + /* If here, we've got the ownership of the SHA device. */ >> + dd->flags |= AES_FLAGS_OWN_SHA; >> + >> + /* Configure the SHA device. */ >> + return atmel_sha_authenc_init(&rctx->auth_req, >> + req->src, req->assoclen, >> + rctx->textlen, >> + atmel_aes_authenc_transfer, dd); >> +} >> + >> +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, >> + bool is_async) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + bool enc = atmel_aes_is_encrypt(dd); >> + struct scatterlist *src, *dst; >> + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; >> + u32 emr; >> + >> + if (is_async) >> + dd->is_async = true; >> + if (err) >> + return atmel_aes_complete(dd, err); >> + >> + /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */ >> + src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); >> + dst = src; >> + >> + if (req->src != req->dst) { >> + err = atmel_aes_authenc_copy_assoc(req); >> + if (err) >> + return atmel_aes_complete(dd, err); >> + >> + dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); >> + } >> + >> + /* Configure the AES device. */ >> + memcpy(iv, req->iv, sizeof(iv)); >> + >> + /* >> + * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to >> + * 'true' even if the data transfer is actually performed by the CPU (so >> + * not by the DMA) because we must force the AES_MR_SMOD bitfield to the >> + * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD >> + * must be set to *_MR_SMOD_IDATAR0. >> + */ >> + atmel_aes_write_ctrl(dd, true, iv); >> + emr = AES_EMR_PLIPEN; >> + if (!enc) >> + emr |= AES_EMR_PLIPD; >> + atmel_aes_write(dd, AES_EMR, emr); >> + >> + /* Transfer data. */ >> + return atmel_aes_dma_start(dd, src, dst, rctx->textlen, >> + atmel_aes_authenc_digest); >> +} >> + >> +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + >> + /* atmel_sha_authenc_final() releases the SHA device. */ >> + dd->flags &= ~AES_FLAGS_OWN_SHA; >> + return atmel_sha_authenc_final(&rctx->auth_req, >> + rctx->digest, sizeof(rctx->digest), >> + atmel_aes_authenc_final, dd); >> +} >> + >> +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, >> + bool is_async) >> +{ >> + struct aead_request *req = aead_request_cast(dd->areq); >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + struct crypto_aead *tfm = crypto_aead_reqtfm(req); >> + bool enc = atmel_aes_is_encrypt(dd); >> + u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest; >> + u32 offs, authsize; >> + >> + if (is_async) >> + dd->is_async = true; >> + if (err) >> + goto complete; >> + >> + offs = req->assoclen + rctx->textlen; >> + authsize = crypto_aead_authsize(tfm); >> + if (enc) { >> + scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1); >> + } else { >> + scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0); >> + if (crypto_memneq(idigest, odigest, authsize)) >> + err = -EBADMSG; >> + } >> + >> +complete: >> + return atmel_aes_complete(dd, err); >> +} >> + >> +static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, >> + unsigned int keylen) >> +{ >> + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); >> + struct crypto_authenc_keys keys; >> + u32 flags; >> + int err; >> + >> + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) >> + goto badkey; >> + >> + if (keys.enckeylen > sizeof(ctx->base.key)) >> + goto badkey; >> + >> + /* Save auth key. */ >> + flags = crypto_aead_get_flags(tfm); >> + err = atmel_sha_authenc_setkey(ctx->auth, >> + keys.authkey, keys.authkeylen, >> + &flags); >> + crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK); >> + if (err) >> + return err; >> + >> + /* Save enc key. */ >> + ctx->base.keylen = keys.enckeylen; >> + memcpy(ctx->base.key, keys.enckey, keys.enckeylen); > > memzero_explicit(keys) please good point :) >> + >> + return 0; >> + >> +badkey: >> + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); >> + return -EINVAL; >> +} >> + >> +static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm, >> + unsigned long auth_mode) >> +{ >> + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); >> + unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize(); >> + >> + ctx->auth = atmel_sha_authenc_spawn(auth_mode); >> + if (IS_ERR(ctx->auth)) >> + return PTR_ERR(ctx->auth); >> + >> + crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) + >> + auth_reqsize)); >> + ctx->base.start = atmel_aes_authenc_start; >> + >> + return 0; >> +} >> + >> +static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm) >> +{ >> + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1); >> +} >> + >> +static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm) >> +{ >> + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224); >> +} >> + >> +static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm) >> +{ >> + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256); >> +} >> + >> +static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm) >> +{ >> + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384); >> +} >> + >> +static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm) >> +{ >> + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512); >> +} >> + >> +static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm) >> +{ >> + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); >> + >> + atmel_sha_authenc_free(ctx->auth); >> +} >> + >> +static int atmel_aes_authenc_crypt(struct aead_request *req, >> + unsigned long mode) >> +{ >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); >> + struct crypto_aead *tfm = crypto_aead_reqtfm(req); >> + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); >> + u32 authsize = crypto_aead_authsize(tfm); >> + bool enc = (mode & AES_FLAGS_ENCRYPT); >> + struct atmel_aes_dev *dd; >> + >> + /* Compute text length. */ >> + rctx->textlen = req->cryptlen - (enc ? 0 : authsize); > > Is there somewhere a check that authsize is always < req->cryptlen (at least > it escaped me)? Note, this logic will be exposed to user space which may do > funky things. I thought those 2 sizes were always set by the kernel only but I admit I didn't check my assumption. If you tell me they could be set directly from the userspace, yes I agree with you, I need to add a test. >> + >> + /* >> + * Currently, empty messages are not supported yet: >> + * the SHA auto-padding can be used only on non-empty messages. >> + * Hence a special case needs to be implemented for empty message. >> + */ >> + if (!rctx->textlen && !req->assoclen) >> + return -EINVAL; >> + >> + rctx->base.mode = mode; >> + ctx->block_size = AES_BLOCK_SIZE; >> + >> + dd = atmel_aes_find_dev(ctx); >> + if (!dd) >> + return -ENODEV; >> + >> + return atmel_aes_handle_queue(dd, &req->base); > > Ciao > Stephan > thanks for your review! :) Best regards, Cyrille
Am Montag, 9. Januar 2017, 19:24:12 CET schrieb Cyrille Pitchen: Hi Cyrille, > >> +static int atmel_aes_authenc_copy_assoc(struct aead_request *req) > >> +{ > >> + size_t buflen, assoclen = req->assoclen; > >> + off_t skip = 0; > >> + u8 buf[256]; > >> + > >> + while (assoclen) { > >> + buflen = min_t(size_t, assoclen, sizeof(buf)); > >> + > >> + if (sg_pcopy_to_buffer(req->src, sg_nents(req->src), > >> + buf, buflen, skip) != buflen) > >> + return -EINVAL; > >> + > >> + if (sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), > >> + buf, buflen, skip) != buflen) > >> + return -EINVAL; > >> + > >> + skip += buflen; > >> + assoclen -= buflen; > >> + } > > > > This seems to be a very expansive operation. Wouldn't it be easier, leaner > > and with one less memcpy to use the approach of > > crypto_authenc_copy_assoc? > > > > Instead of copying crypto_authenc_copy_assoc, what about carving the logic > > in crypto/authenc.c out into a generic aead helper code as we need to add > > that to other AEAD implementations? > > Before writing this function, I checked how the crypto/authenc.c driver > handles the copy of the associated data, hence crypto_authenc_copy_assoc(). > > I have to admit I didn't perform any benchmark to compare the two > implementation but I just tried to understand how > crypto_authenc_copy_assoc() works. At the first look, this function seems > very simple but I guess all the black magic is hidden by the call of > crypto_skcipher_encrypt() on the default null transform, which is > implemented using the ecb(cipher_null) algorithm. The magic in the null cipher is that it not only performs a memcpy, but iterates through the SGL and performs a memcpy on each part of the source/ destination SGL. I will release a patch set later today -- the coding is completed, but testing is yet under way. That patch now allows you to make only one function call without special init/deinit code. > > When I wrote my function I thought that this ecb(cipher_null) algorithm was > implemented by combining crypto_ecb_crypt() from crypto/ecb.c with > null_crypt() from crypto/crypto_null.c. Hence I thought there would be much > function call overhead to copy only few bytes but now checking again I > realize that the ecb(cipher_null) algorithm is directly implemented by > skcipher_null_crypt() still from crypto/crypto_null.c. So yes, maybe you're > right: it could be better to reuse what was done in > crypto_authenc_copy_assoc() from crypto/authenc.c. > > This way we could need twice less memcpy() hence I agree with you. In addition to the additional memcpy, the patch I want to air shortly (and which I hope is going to be accepted) should reduce the complexity of your code in this corner. ... > >> +static int atmel_aes_authenc_crypt(struct aead_request *req, > >> + unsigned long mode) > >> +{ > >> + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); > >> + struct crypto_aead *tfm = crypto_aead_reqtfm(req); > >> + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); > >> + u32 authsize = crypto_aead_authsize(tfm); > >> + bool enc = (mode & AES_FLAGS_ENCRYPT); > >> + struct atmel_aes_dev *dd; > >> + > >> + /* Compute text length. */ > >> + rctx->textlen = req->cryptlen - (enc ? 0 : authsize); > > > > Is there somewhere a check that authsize is always < req->cryptlen (at > > least it escaped me)? Note, this logic will be exposed to user space > > which may do funky things. > > I thought those 2 sizes were always set by the kernel only but I admit I > didn't check my assumption. If you tell me they could be set directly from > the userspace, yes I agree with you, I need to add a test. Then I would like to ask you adding that check -- as this check is cheap, it should not affect performance. Ciao Stephan
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 79564785ae30..719a868d8ea1 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -415,6 +415,18 @@ config CRYPTO_DEV_BFIN_CRC Newer Blackfin processors have CRC hardware. Select this if you want to use the Blackfin CRC module. +config CRYPTO_DEV_ATMEL_AUTHENC + tristate "Support for Atmel IPSEC/SSL hw accelerator" + depends on (ARCH_AT91 && HAS_DMA) || COMPILE_TEST + select CRYPTO_AUTHENC + select CRYPTO_DEV_ATMEL_AES + select CRYPTO_DEV_ATMEL_SHA + help + Some Atmel processors can combine the AES and SHA hw accelerators + to enhance support of IPSEC/SSL. + Select this if you want to use the Atmel modules for + authenc(hmac(shaX),Y(cbc)) algorithms. + config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" depends on HAS_DMA diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h index 0ec04407b533..7694679802b3 100644 --- a/drivers/crypto/atmel-aes-regs.h +++ b/drivers/crypto/atmel-aes-regs.h @@ -68,6 +68,22 @@ #define AES_CTRR 0x98 #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) +#define AES_EMR 0xb0 +#define AES_EMR_APEN BIT(0) /* Auto Padding Enable */ +#define AES_EMR_APM BIT(1) /* Auto Padding Mode */ +#define AES_EMR_APM_IPSEC 0x0 +#define AES_EMR_APM_SSL BIT(1) +#define AES_EMR_PLIPEN BIT(4) /* PLIP Enable */ +#define AES_EMR_PLIPD BIT(5) /* PLIP Decipher */ +#define AES_EMR_PADLEN_MASK (0xFu << 8) +#define AES_EMR_PADLEN_OFFSET 8 +#define AES_EMR_PADLEN(padlen) (((padlen) << AES_EMR_PADLEN_OFFSET) &\ + AES_EMR_PADLEN_MASK) +#define AES_EMR_NHEAD_MASK (0xFu << 16) +#define AES_EMR_NHEAD_OFFSET 16 +#define AES_EMR_NHEAD(nhead) (((nhead) << AES_EMR_NHEAD_OFFSET) &\ + AES_EMR_NHEAD_MASK) + #define AES_TWR(x) (0xc0 + ((x) * 0x04)) #define AES_ALPHAR(x) (0xd0 + ((x) * 0x04)) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 9fd2f63b8bc0..3c651e0c3113 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -41,6 +41,7 @@ #include <linux/platform_data/crypto-atmel.h> #include <dt-bindings/dma/at91.h> #include "atmel-aes-regs.h" +#include "atmel-authenc.h" #define ATMEL_AES_PRIORITY 300 @@ -78,6 +79,7 @@ #define AES_FLAGS_INIT BIT(2) #define AES_FLAGS_BUSY BIT(3) #define AES_FLAGS_DUMP_REG BIT(4) +#define AES_FLAGS_OWN_SHA BIT(5) #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) @@ -92,6 +94,7 @@ struct atmel_aes_caps { bool has_ctr32; bool has_gcm; bool has_xts; + bool has_authenc; u32 max_burst_size; }; @@ -144,10 +147,31 @@ struct atmel_aes_xts_ctx { u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; }; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +struct atmel_aes_authenc_ctx { + struct atmel_aes_base_ctx base; + struct atmel_sha_authenc_ctx *auth; +}; +#endif + struct atmel_aes_reqctx { unsigned long mode; }; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +struct atmel_aes_authenc_reqctx { + struct atmel_aes_reqctx base; + + struct scatterlist src[2]; + struct scatterlist dst[2]; + size_t textlen; + u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)]; + + /* auth_req MUST be place last. */ + struct ahash_request auth_req; +}; +#endif + struct atmel_aes_dma { struct dma_chan *chan; struct scatterlist *sg; @@ -291,6 +315,9 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); break; + case AES_EMR: + return "EMR"; + case AES_TWR(0): case AES_TWR(1): case AES_TWR(2): @@ -463,8 +490,16 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) return (dd->flags & AES_FLAGS_ENCRYPT); } +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); +#endif + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + atmel_aes_authenc_complete(dd, err); +#endif + clk_disable(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; @@ -1931,6 +1966,407 @@ static struct crypto_alg aes_xts_alg = { } }; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +/* authenc aead functions */ + +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, + bool is_async); +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, + bool is_async); +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd); +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, + bool is_async); + +static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + + if (err && (dd->flags & AES_FLAGS_OWN_SHA)) + atmel_sha_authenc_abort(&rctx->auth_req); + dd->flags &= ~AES_FLAGS_OWN_SHA; +} + +static int atmel_aes_authenc_copy_assoc(struct aead_request *req) +{ + size_t buflen, assoclen = req->assoclen; + off_t skip = 0; + u8 buf[256]; + + while (assoclen) { + buflen = min_t(size_t, assoclen, sizeof(buf)); + + if (sg_pcopy_to_buffer(req->src, sg_nents(req->src), + buf, buflen, skip) != buflen) + return -EINVAL; + + if (sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), + buf, buflen, skip) != buflen) + return -EINVAL; + + skip += buflen; + assoclen -= buflen; + } + + return 0; +} + +static int atmel_aes_authenc_start(struct atmel_aes_dev *dd) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); + int err; + + atmel_aes_set_mode(dd, &rctx->base); + + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); + + return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth, + atmel_aes_authenc_init, dd); +} + +static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, + bool is_async) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + + if (is_async) + dd->is_async = true; + if (err) + return atmel_aes_complete(dd, err); + + /* If here, we've got the ownership of the SHA device. */ + dd->flags |= AES_FLAGS_OWN_SHA; + + /* Configure the SHA device. */ + return atmel_sha_authenc_init(&rctx->auth_req, + req->src, req->assoclen, + rctx->textlen, + atmel_aes_authenc_transfer, dd); +} + +static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, + bool is_async) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + bool enc = atmel_aes_is_encrypt(dd); + struct scatterlist *src, *dst; + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + u32 emr; + + if (is_async) + dd->is_async = true; + if (err) + return atmel_aes_complete(dd, err); + + /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */ + src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); + dst = src; + + if (req->src != req->dst) { + err = atmel_aes_authenc_copy_assoc(req); + if (err) + return atmel_aes_complete(dd, err); + + dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); + } + + /* Configure the AES device. */ + memcpy(iv, req->iv, sizeof(iv)); + + /* + * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to + * 'true' even if the data transfer is actually performed by the CPU (so + * not by the DMA) because we must force the AES_MR_SMOD bitfield to the + * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD + * must be set to *_MR_SMOD_IDATAR0. + */ + atmel_aes_write_ctrl(dd, true, iv); + emr = AES_EMR_PLIPEN; + if (!enc) + emr |= AES_EMR_PLIPD; + atmel_aes_write(dd, AES_EMR, emr); + + /* Transfer data. */ + return atmel_aes_dma_start(dd, src, dst, rctx->textlen, + atmel_aes_authenc_digest); +} + +static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + + /* atmel_sha_authenc_final() releases the SHA device. */ + dd->flags &= ~AES_FLAGS_OWN_SHA; + return atmel_sha_authenc_final(&rctx->auth_req, + rctx->digest, sizeof(rctx->digest), + atmel_aes_authenc_final, dd); +} + +static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, + bool is_async) +{ + struct aead_request *req = aead_request_cast(dd->areq); + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + bool enc = atmel_aes_is_encrypt(dd); + u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest; + u32 offs, authsize; + + if (is_async) + dd->is_async = true; + if (err) + goto complete; + + offs = req->assoclen + rctx->textlen; + authsize = crypto_aead_authsize(tfm); + if (enc) { + scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1); + } else { + scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0); + if (crypto_memneq(idigest, odigest, authsize)) + err = -EBADMSG; + } + +complete: + return atmel_aes_complete(dd, err); +} + +static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_authenc_keys keys; + u32 flags; + int err; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + if (keys.enckeylen > sizeof(ctx->base.key)) + goto badkey; + + /* Save auth key. */ + flags = crypto_aead_get_flags(tfm); + err = atmel_sha_authenc_setkey(ctx->auth, + keys.authkey, keys.authkeylen, + &flags); + crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK); + if (err) + return err; + + /* Save enc key. */ + ctx->base.keylen = keys.enckeylen; + memcpy(ctx->base.key, keys.enckey, keys.enckeylen); + + return 0; + +badkey: + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm, + unsigned long auth_mode) +{ + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); + unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize(); + + ctx->auth = atmel_sha_authenc_spawn(auth_mode); + if (IS_ERR(ctx->auth)) + return PTR_ERR(ctx->auth); + + crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) + + auth_reqsize)); + ctx->base.start = atmel_aes_authenc_start; + + return 0; +} + +static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm) +{ + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1); +} + +static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm) +{ + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224); +} + +static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm) +{ + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256); +} + +static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm) +{ + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384); +} + +static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm) +{ + return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512); +} + +static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm) +{ + struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); + + atmel_sha_authenc_free(ctx->auth); +} + +static int atmel_aes_authenc_crypt(struct aead_request *req, + unsigned long mode) +{ + struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); + u32 authsize = crypto_aead_authsize(tfm); + bool enc = (mode & AES_FLAGS_ENCRYPT); + struct atmel_aes_dev *dd; + + /* Compute text length. */ + rctx->textlen = req->cryptlen - (enc ? 0 : authsize); + + /* + * Currently, empty messages are not supported yet: + * the SHA auto-padding can be used only on non-empty messages. + * Hence a special case needs to be implemented for empty message. + */ + if (!rctx->textlen && !req->assoclen) + return -EINVAL; + + rctx->base.mode = mode; + ctx->block_size = AES_BLOCK_SIZE; + + dd = atmel_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + return atmel_aes_handle_queue(dd, &req->base); +} + +static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req) +{ + return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); +} + +static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req) +{ + return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC); +} + +static struct aead_alg aes_authenc_algs[] = { +{ + .setkey = atmel_aes_authenc_setkey, + .encrypt = atmel_aes_authenc_cbc_aes_encrypt, + .decrypt = atmel_aes_authenc_cbc_aes_decrypt, + .init = atmel_aes_authenc_hmac_sha1_init_tfm, + .exit = atmel_aes_authenc_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +{ + .setkey = atmel_aes_authenc_setkey, + .encrypt = atmel_aes_authenc_cbc_aes_encrypt, + .decrypt = atmel_aes_authenc_cbc_aes_decrypt, + .init = atmel_aes_authenc_hmac_sha224_init_tfm, + .exit = atmel_aes_authenc_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +{ + .setkey = atmel_aes_authenc_setkey, + .encrypt = atmel_aes_authenc_cbc_aes_encrypt, + .decrypt = atmel_aes_authenc_cbc_aes_decrypt, + .init = atmel_aes_authenc_hmac_sha256_init_tfm, + .exit = atmel_aes_authenc_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +{ + .setkey = atmel_aes_authenc_setkey, + .encrypt = atmel_aes_authenc_cbc_aes_encrypt, + .decrypt = atmel_aes_authenc_cbc_aes_decrypt, + .init = atmel_aes_authenc_hmac_sha384_init_tfm, + .exit = atmel_aes_authenc_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +{ + .setkey = atmel_aes_authenc_setkey, + .encrypt = atmel_aes_authenc_cbc_aes_encrypt, + .decrypt = atmel_aes_authenc_cbc_aes_decrypt, + .init = atmel_aes_authenc_hmac_sha512_init_tfm, + .exit = atmel_aes_authenc_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +}; +#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ /* Probe functions */ @@ -2040,6 +2476,12 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + if (dd->caps.has_authenc) + for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) + crypto_unregister_aead(&aes_authenc_algs[i]); +#endif + if (dd->caps.has_xts) crypto_unregister_alg(&aes_xts_alg); @@ -2081,8 +2523,25 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) goto err_aes_xts_alg; } +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + if (dd->caps.has_authenc) { + for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) { + err = crypto_register_aead(&aes_authenc_algs[i]); + if (err) + goto err_aes_authenc_alg; + } + } +#endif + return 0; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + /* i = ARRAY_SIZE(aes_authenc_algs); */ +err_aes_authenc_alg: + for (j = 0; j < i; j++) + crypto_unregister_aead(&aes_authenc_algs[j]); + crypto_unregister_alg(&aes_xts_alg); +#endif err_aes_xts_alg: crypto_unregister_aead(&aes_gcm_alg); err_aes_gcm_alg: @@ -2103,6 +2562,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) dd->caps.has_ctr32 = 0; dd->caps.has_gcm = 0; dd->caps.has_xts = 0; + dd->caps.has_authenc = 0; dd->caps.max_burst_size = 1; /* keep only major version number */ @@ -2113,6 +2573,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) dd->caps.has_ctr32 = 1; dd->caps.has_gcm = 1; dd->caps.has_xts = 1; + dd->caps.has_authenc = 1; dd->caps.max_burst_size = 4; break; case 0x200: @@ -2271,6 +2732,13 @@ static int atmel_aes_probe(struct platform_device *pdev) atmel_aes_get_cap(aes_dd); +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { + err = -EPROBE_DEFER; + goto iclk_unprepare; + } +#endif + err = atmel_aes_buff_init(aes_dd); if (err) goto err_aes_buff; @@ -2307,7 +2775,8 @@ static int atmel_aes_probe(struct platform_device *pdev) tasklet_kill(&aes_dd->done_task); tasklet_kill(&aes_dd->queue_task); aes_dd_err: - dev_err(dev, "initialization failed.\n"); + if (err != -EPROBE_DEFER) + dev_err(dev, "initialization failed.\n"); return err; } diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h new file mode 100644 index 000000000000..2a60d1224143 --- /dev/null +++ b/drivers/crypto/atmel-authenc.h @@ -0,0 +1,64 @@ +/* + * API for Atmel Secure Protocol Layers Improved Performances (SPLIP) + * + * Copyright (C) 2016 Atmel Corporation + * + * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. + */ + +#ifndef __ATMEL_AUTHENC_H__ +#define __ATMEL_AUTHENC_H__ + +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC + +#include <crypto/authenc.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include "atmel-sha-regs.h" + +struct atmel_aes_dev; +typedef int (*atmel_aes_authenc_fn_t)(struct atmel_aes_dev *, int, bool); + +struct atmel_sha_authenc_ctx; + +bool atmel_sha_authenc_is_ready(void); +unsigned int atmel_sha_authenc_get_reqsize(void); + +struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode); +void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth); +int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, + const u8 *key, unsigned int keylen, + u32 *flags); + +int atmel_sha_authenc_schedule(struct ahash_request *req, + struct atmel_sha_authenc_ctx *auth, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *dd); +int atmel_sha_authenc_init(struct ahash_request *req, + struct scatterlist *assoc, unsigned int assoclen, + unsigned int textlen, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *dd); +int atmel_sha_authenc_final(struct ahash_request *req, + u32 *digest, unsigned int digestlen, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *dd); +void atmel_sha_authenc_abort(struct ahash_request *req); + +#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ + +#endif /* __ATMEL_AUTHENC_H__ */ diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h index 1b9f3d33079e..1b0eba4a2706 100644 --- a/drivers/crypto/atmel-sha-regs.h +++ b/drivers/crypto/atmel-sha-regs.h @@ -29,6 +29,20 @@ #define SHA_MR_HMAC (1 << 11) #define SHA_MR_DUALBUFF (1 << 16) +#define SHA_FLAGS_ALGO_MASK SHA_MR_ALGO_MASK +#define SHA_FLAGS_SHA1 SHA_MR_ALGO_SHA1 +#define SHA_FLAGS_SHA256 SHA_MR_ALGO_SHA256 +#define SHA_FLAGS_SHA384 SHA_MR_ALGO_SHA384 +#define SHA_FLAGS_SHA512 SHA_MR_ALGO_SHA512 +#define SHA_FLAGS_SHA224 SHA_MR_ALGO_SHA224 +#define SHA_FLAGS_HMAC SHA_MR_HMAC +#define SHA_FLAGS_HMAC_SHA1 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA1) +#define SHA_FLAGS_HMAC_SHA256 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA256) +#define SHA_FLAGS_HMAC_SHA384 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA384) +#define SHA_FLAGS_HMAC_SHA512 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA512) +#define SHA_FLAGS_HMAC_SHA224 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA224) +#define SHA_FLAGS_MODE_MASK (SHA_FLAGS_HMAC | SHA_FLAGS_ALGO_MASK) + #define SHA_IER 0x10 #define SHA_IDR 0x14 #define SHA_IMR 0x18 diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 78c3c02e4483..cc5294dbead4 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -41,6 +41,7 @@ #include <crypto/internal/hash.h> #include <linux/platform_data/crypto-atmel.h> #include "atmel-sha-regs.h" +#include "atmel-authenc.h" /* SHA flags */ #define SHA_FLAGS_BUSY BIT(0) @@ -52,19 +53,6 @@ #define SHA_FLAGS_DMA_READY BIT(6) /* bits[11:8] are reserved. */ -#define SHA_FLAGS_ALGO_MASK SHA_MR_ALGO_MASK -#define SHA_FLAGS_SHA1 SHA_MR_ALGO_SHA1 -#define SHA_FLAGS_SHA256 SHA_MR_ALGO_SHA256 -#define SHA_FLAGS_SHA384 SHA_MR_ALGO_SHA384 -#define SHA_FLAGS_SHA512 SHA_MR_ALGO_SHA512 -#define SHA_FLAGS_SHA224 SHA_MR_ALGO_SHA224 -#define SHA_FLAGS_HMAC SHA_MR_HMAC -#define SHA_FLAGS_HMAC_SHA1 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA1) -#define SHA_FLAGS_HMAC_SHA256 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA256) -#define SHA_FLAGS_HMAC_SHA384 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA384) -#define SHA_FLAGS_HMAC_SHA512 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA512) -#define SHA_FLAGS_HMAC_SHA224 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA224) -#define SHA_FLAGS_MODE_MASK (SHA_FLAGS_HMAC | SHA_FLAGS_ALGO_MASK) #define SHA_FLAGS_FINUP BIT(16) #define SHA_FLAGS_SG BIT(17) @@ -156,6 +144,7 @@ struct atmel_sha_dev { struct crypto_queue queue; struct ahash_request *req; bool is_async; + bool force_complete; atmel_sha_fn_t resume; atmel_sha_fn_t cpu_transfer_complete; @@ -198,7 +187,7 @@ static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err) clk_disable(dd->iclk); - if (dd->is_async && req->base.complete) + if ((dd->is_async || dd->force_complete) && req->base.complete) req->base.complete(&req->base, err); /* handle new request */ @@ -992,6 +981,7 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, dd->req = ahash_request_cast(async_req); start_async = (dd->req != req); dd->is_async = start_async; + dd->force_complete = false; /* WARNING: ctx->start() MAY change dd->is_async. */ err = ctx->start(dd); @@ -2100,6 +2090,332 @@ static struct ahash_alg sha_hmac_algs[] = { }, }; +#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +/* authenc functions */ + +static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); +static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd); +static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd); + + +struct atmel_sha_authenc_ctx { + struct crypto_ahash *tfm; +}; + +struct atmel_sha_authenc_reqctx { + struct atmel_sha_reqctx base; + + atmel_aes_authenc_fn_t cb; + struct atmel_aes_dev *aes_dev; + + /* _init() parameters. */ + struct scatterlist *assoc; + u32 assoclen; + u32 textlen; + + /* _final() parameters. */ + u32 *digest; + unsigned int digestlen; +}; + +static void atmel_sha_authenc_complete(struct crypto_async_request *areq, + int err) +{ + struct ahash_request *req = areq->data; + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + + authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); +} + +static int atmel_sha_authenc_start(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + int err; + + /* + * Force atmel_sha_complete() to call req->base.complete(), ie + * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). + */ + dd->force_complete = true; + + err = atmel_sha_hw_init(dd); + return authctx->cb(authctx->aes_dev, err, dd->is_async); +} + +bool atmel_sha_authenc_is_ready(void) +{ + struct atmel_sha_ctx dummy; + + dummy.dd = NULL; + return (atmel_sha_find_dev(&dummy) != NULL); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready); + +unsigned int atmel_sha_authenc_get_reqsize(void) +{ + return sizeof(struct atmel_sha_authenc_reqctx); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize); + +struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode) +{ + struct atmel_sha_authenc_ctx *auth; + struct crypto_ahash *tfm; + struct atmel_sha_ctx *tctx; + const char *name; + int err = -EINVAL; + + switch (mode & SHA_FLAGS_MODE_MASK) { + case SHA_FLAGS_HMAC_SHA1: + name = "atmel-hmac-sha1"; + break; + + case SHA_FLAGS_HMAC_SHA224: + name = "atmel-hmac-sha224"; + break; + + case SHA_FLAGS_HMAC_SHA256: + name = "atmel-hmac-sha256"; + break; + + case SHA_FLAGS_HMAC_SHA384: + name = "atmel-hmac-sha384"; + break; + + case SHA_FLAGS_HMAC_SHA512: + name = "atmel-hmac-sha512"; + break; + + default: + goto error; + } + + tfm = crypto_alloc_ahash(name, + CRYPTO_ALG_TYPE_AHASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(tfm)) { + err = PTR_ERR(tfm); + goto error; + } + tctx = crypto_ahash_ctx(tfm); + tctx->start = atmel_sha_authenc_start; + tctx->flags = mode; + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) { + err = -ENOMEM; + goto err_free_ahash; + } + auth->tfm = tfm; + + return auth; + +err_free_ahash: + crypto_free_ahash(tfm); +error: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn); + +void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) +{ + if (auth) + crypto_free_ahash(auth->tfm); + kfree(auth); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); + +int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, + const u8 *key, unsigned int keylen, + u32 *flags) +{ + struct crypto_ahash *tfm = auth->tfm; + int err; + + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(tfm, key, keylen); + *flags = crypto_ahash_get_flags(tfm); + + return err; +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); + +int atmel_sha_authenc_schedule(struct ahash_request *req, + struct atmel_sha_authenc_ctx *auth, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *aes_dev) +{ + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + struct atmel_sha_reqctx *ctx = &authctx->base; + struct crypto_ahash *tfm = auth->tfm; + struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); + struct atmel_sha_dev *dd; + + /* Reset request context (MUST be done first). */ + memset(authctx, 0, sizeof(*authctx)); + + /* Get SHA device. */ + dd = atmel_sha_find_dev(tctx); + if (!dd) + return cb(aes_dev, -ENODEV, false); + + /* Init request context. */ + ctx->dd = dd; + ctx->buflen = SHA_BUFFER_LEN; + authctx->cb = cb; + authctx->aes_dev = aes_dev; + ahash_request_set_tfm(req, tfm); + ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req); + + return atmel_sha_handle_queue(dd, req); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule); + +int atmel_sha_authenc_init(struct ahash_request *req, + struct scatterlist *assoc, unsigned int assoclen, + unsigned int textlen, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *aes_dev) +{ + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + struct atmel_sha_reqctx *ctx = &authctx->base; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); + struct atmel_sha_dev *dd = ctx->dd; + + if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32)))) + return atmel_sha_complete(dd, -EINVAL); + + authctx->cb = cb; + authctx->aes_dev = aes_dev; + authctx->assoc = assoc; + authctx->assoclen = assoclen; + authctx->textlen = textlen; + + ctx->flags = hmac->base.flags; + return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_init); + +static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + struct atmel_sha_reqctx *ctx = &authctx->base; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); + size_t hs = ctx->hash_size; + size_t i, num_words = hs / sizeof(u32); + u32 mr, msg_size; + + atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); + for (i = 0; i < num_words; ++i) + atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); + + atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); + for (i = 0; i < num_words; ++i) + atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); + + mr = (SHA_MR_MODE_IDATAR0 | + SHA_MR_HMAC | + SHA_MR_DUALBUFF); + mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; + atmel_sha_write(dd, SHA_MR, mr); + + msg_size = authctx->assoclen + authctx->textlen; + atmel_sha_write(dd, SHA_MSR, msg_size); + atmel_sha_write(dd, SHA_BCR, msg_size); + + atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); + + /* Process assoc data. */ + return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, + true, false, + atmel_sha_authenc_init_done); +} + +static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + + return authctx->cb(authctx->aes_dev, 0, dd->is_async); +} + +int atmel_sha_authenc_final(struct ahash_request *req, + u32 *digest, unsigned int digestlen, + atmel_aes_authenc_fn_t cb, + struct atmel_aes_dev *aes_dev) +{ + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + struct atmel_sha_reqctx *ctx = &authctx->base; + struct atmel_sha_dev *dd = ctx->dd; + + switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { + case SHA_FLAGS_SHA1: + authctx->digestlen = SHA1_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA224: + authctx->digestlen = SHA224_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA256: + authctx->digestlen = SHA256_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA384: + authctx->digestlen = SHA384_DIGEST_SIZE; + break; + + case SHA_FLAGS_SHA512: + authctx->digestlen = SHA512_DIGEST_SIZE; + break; + + default: + return atmel_sha_complete(dd, -EINVAL); + } + if (authctx->digestlen > digestlen) + authctx->digestlen = digestlen; + + authctx->cb = cb; + authctx->aes_dev = aes_dev; + authctx->digest = digest; + return atmel_sha_wait_for_data_ready(dd, + atmel_sha_authenc_final_done); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_final); + +static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd) +{ + struct ahash_request *req = dd->req; + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + size_t i, num_words = authctx->digestlen / sizeof(u32); + + for (i = 0; i < num_words; ++i) + authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); + + return atmel_sha_complete(dd, 0); +} + +void atmel_sha_authenc_abort(struct ahash_request *req) +{ + struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); + struct atmel_sha_reqctx *ctx = &authctx->base; + struct atmel_sha_dev *dd = ctx->dd; + + /* Prevent atmel_sha_complete() from calling req->base.complete(). */ + dd->is_async = false; + dd->force_complete = false; + (void)atmel_sha_complete(dd, 0); +} +EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort); + +#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ + + static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) { int i;
This patchs allows to combine the AES and SHA hardware accelerators on some Atmel SoCs. Doing so, AES blocks are only written to/read from the AES hardware. Those blocks are also transferred from the AES to the SHA accelerator internally, without additionnal accesses to the system busses. Hence, the AES and SHA accelerators work in parallel to process all the data blocks, instead of serializing the process by (de)crypting those blocks first then authenticating them after like the generic crypto/authenc.c driver does. Of course, both the AES and SHA hardware accelerators need to be available before we can start to process the data blocks. Hence we use their crypto request queue to synchronize both drivers. Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com> --- drivers/crypto/Kconfig | 12 + drivers/crypto/atmel-aes-regs.h | 16 ++ drivers/crypto/atmel-aes.c | 471 +++++++++++++++++++++++++++++++++++++++- drivers/crypto/atmel-authenc.h | 64 ++++++ drivers/crypto/atmel-sha-regs.h | 14 ++ drivers/crypto/atmel-sha.c | 344 +++++++++++++++++++++++++++-- 6 files changed, 906 insertions(+), 15 deletions(-) create mode 100644 drivers/crypto/atmel-authenc.h