@@ -248,6 +248,64 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return crypto_transfer_hash_request_to_engine(engine, areq);
}
+static u64 hash_pad(u32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
+{
+ u64 fill, min_fill, j, k;
+ __be64 *bebits;
+ __le64 *lebits;
+
+ j = padi;
+ buf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + sizeof(u32);
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + sizeof(u32);
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ k = j;
+ j += (fill - min_fill) / sizeof(u32);
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+ for (; k < j; k++)
+ buf[k] = 0;
+
+ if (le) {
+ /* MD5 */
+ lebits = (__le64 *)&buf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ } else {
+ if (bs == 64) {
+ /* sha1 sha224 sha256 */
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ } else {
+ /* sha384 sha512*/
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&buf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ }
+ }
+ if (j * 4 > bufsize) {
+ pr_err("%s OVERFLOW %llu\n", __func__, j);
+ return 0;
+ }
+
+ return j;
+}
+
int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
@@ -266,10 +324,6 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
__le32 *bf;
void *buf = NULL;
int j, i, todo;
- int nbw = 0;
- u64 fill, min_fill;
- __be64 *bebits;
- __le64 *lebits;
void *result = NULL;
u64 bs;
int digestsize;
@@ -348,44 +402,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
byte_count = areq->nbytes;
j = 0;
- bf[j++] = cpu_to_le32(0x80);
-
- if (bs == 64) {
- fill = 64 - (byte_count % 64);
- min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- } else {
- fill = 128 - (byte_count % 128);
- min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
- }
-
- if (fill < min_fill)
- fill += bs;
-
- j += (fill - min_fill) / sizeof(u32);
switch (algt->ce_algo_id) {
case CE_ID_HASH_MD5:
- lebits = (__le64 *)&bf[j];
- *lebits = cpu_to_le64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
break;
case CE_ID_HASH_SHA1:
case CE_ID_HASH_SHA224:
case CE_ID_HASH_SHA256:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
case CE_ID_HASH_SHA384:
case CE_ID_HASH_SHA512:
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count >> 61);
- j += 2;
- bebits = (__be64 *)&bf[j];
- *bebits = cpu_to_be64(byte_count << 3);
- j += 2;
+ j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
break;
}
+ if (!j) {
+ err = -EINVAL;
+ goto theend;
+ }
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
cet->t_src[i].addr = cpu_to_le32(addr_pad);
Move all padding work to a dedicated function. Signed-off-by: Corentin Labbe <clabbe@baylibre.com> --- .../crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 95 +++++++++++++------ 1 file changed, 65 insertions(+), 30 deletions(-)