diff mbox series

[v2,21/29] crypto: s390/aes-gcm - use the new scatterwalk functions

Message ID 20241230001418.74739-22-ebiggers@kernel.org (mailing list archive)
State Superseded
Delegated to: Herbert Xu
Headers show
Series crypto: scatterlist handling improvements | expand

Commit Message

Eric Biggers Dec. 30, 2024, 12:14 a.m. UTC
From: Eric Biggers <ebiggers@google.com>

Use scatterwalk_next() which consolidates scatterwalk_clamp() and
scatterwalk_map().  Use scatterwalk_done_src() and
scatterwalk_done_dst() which consolidate scatterwalk_unmap(),
scatterwalk_advance(), and scatterwalk_done().

Besides the new functions being a bit easier to use, this is necessary
because scatterwalk_done() is planned to be removed.

Cc: Harald Freudenberger <freude@linux.ibm.com>
Cc: Holger Dengler <dengler@linux.ibm.com>
Cc: linux-s390@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---

This patch is part of a long series touching many files, so I have
limited the Cc list on the full series.  If you want the full series and
did not receive it, please retrieve it from lore.kernel.org.

 arch/s390/crypto/aes_s390.c | 33 +++++++++++++--------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

Comments

Harald Freudenberger Jan. 8, 2025, 3:06 p.m. UTC | #1
On 2024-12-30 01:14, Eric Biggers wrote:
> From: Eric Biggers <ebiggers@google.com>
> 
> Use scatterwalk_next() which consolidates scatterwalk_clamp() and
> scatterwalk_map().  Use scatterwalk_done_src() and
> scatterwalk_done_dst() which consolidate scatterwalk_unmap(),
> scatterwalk_advance(), and scatterwalk_done().
> 
> Besides the new functions being a bit easier to use, this is necessary
> because scatterwalk_done() is planned to be removed.
> 
> Cc: Harald Freudenberger <freude@linux.ibm.com>
> Cc: Holger Dengler <dengler@linux.ibm.com>
> Cc: linux-s390@vger.kernel.org
> Signed-off-by: Eric Biggers <ebiggers@google.com>
> ---
> 
> This patch is part of a long series touching many files, so I have
> limited the Cc list on the full series.  If you want the full series 
> and
> did not receive it, please retrieve it from lore.kernel.org.
> 
>  arch/s390/crypto/aes_s390.c | 33 +++++++++++++--------------------
>  1 file changed, 13 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
> index 9c46b1b630b1..7fd303df05ab 100644
> --- a/arch/s390/crypto/aes_s390.c
> +++ b/arch/s390/crypto/aes_s390.c
> @@ -785,32 +785,25 @@ static void gcm_walk_start(struct gcm_sg_walk
> *gw, struct scatterlist *sg,
>  	scatterwalk_start(&gw->walk, sg);
>  }
> 
>  static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk 
> *gw)
>  {
> -	struct scatterlist *nextsg;
> -
> -	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
> -	while (!gw->walk_bytes) {
> -		nextsg = sg_next(gw->walk.sg);
> -		if (!nextsg)
> -			return 0;
> -		scatterwalk_start(&gw->walk, nextsg);
> -		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
> -						   gw->walk_bytes_remain);
> -	}
> -	gw->walk_ptr = scatterwalk_map(&gw->walk);
> +	if (gw->walk_bytes_remain == 0)
> +		return 0;
> +	gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain,
> +					&gw->walk_bytes);
>  	return gw->walk_bytes;
>  }
> 
>  static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
> -					     unsigned int nbytes)
> +					     unsigned int nbytes, bool out)
>  {
>  	gw->walk_bytes_remain -= nbytes;
> -	scatterwalk_unmap(gw->walk_ptr);
> -	scatterwalk_advance(&gw->walk, nbytes);
> -	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
> +	if (out)
> +		scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes);
> +	else
> +		scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes);
>  	gw->walk_ptr = NULL;
>  }
> 
>  static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int 
> minbytesneeded)
>  {
> @@ -842,11 +835,11 @@ static int gcm_in_walk_go(struct gcm_sg_walk
> *gw, unsigned int minbytesneeded)
> 
>  	while (1) {
>  		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
>  		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
>  		gw->buf_bytes += n;
> -		_gcm_sg_unmap_and_advance(gw, n);
> +		_gcm_sg_unmap_and_advance(gw, n, false);
>  		if (gw->buf_bytes >= minbytesneeded) {
>  			gw->ptr = gw->buf;
>  			gw->nbytes = gw->buf_bytes;
>  			goto out;
>  		}
> @@ -902,11 +895,11 @@ static int gcm_in_walk_done(struct gcm_sg_walk
> *gw, unsigned int bytesdone)
>  			memmove(gw->buf, gw->buf + bytesdone, n);
>  			gw->buf_bytes = n;
>  		} else
>  			gw->buf_bytes = 0;
>  	} else
> -		_gcm_sg_unmap_and_advance(gw, bytesdone);
> +		_gcm_sg_unmap_and_advance(gw, bytesdone, false);
> 
>  	return bytesdone;
>  }
> 
>  static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int 
> bytesdone)
> @@ -920,14 +913,14 @@ static int gcm_out_walk_done(struct gcm_sg_walk
> *gw, unsigned int bytesdone)
>  		for (i = 0; i < bytesdone; i += n) {
>  			if (!_gcm_sg_clamp_and_map(gw))
>  				return i;
>  			n = min(gw->walk_bytes, bytesdone - i);
>  			memcpy(gw->walk_ptr, gw->buf + i, n);
> -			_gcm_sg_unmap_and_advance(gw, n);
> +			_gcm_sg_unmap_and_advance(gw, n, true);
>  		}
>  	} else
> -		_gcm_sg_unmap_and_advance(gw, bytesdone);
> +		_gcm_sg_unmap_and_advance(gw, bytesdone, true);
> 
>  	return bytesdone;
>  }
> 
>  static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)

Reviewed-by: Harald Freudenberger <freude@linux.ibm.com>
Tested-by:  Harald Freudenberger <freude@linux.ibm.com>
diff mbox series

Patch

diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 9c46b1b630b1..7fd303df05ab 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -785,32 +785,25 @@  static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
 	scatterwalk_start(&gw->walk, sg);
 }
 
 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
 {
-	struct scatterlist *nextsg;
-
-	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
-	while (!gw->walk_bytes) {
-		nextsg = sg_next(gw->walk.sg);
-		if (!nextsg)
-			return 0;
-		scatterwalk_start(&gw->walk, nextsg);
-		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
-						   gw->walk_bytes_remain);
-	}
-	gw->walk_ptr = scatterwalk_map(&gw->walk);
+	if (gw->walk_bytes_remain == 0)
+		return 0;
+	gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain,
+					&gw->walk_bytes);
 	return gw->walk_bytes;
 }
 
 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
-					     unsigned int nbytes)
+					     unsigned int nbytes, bool out)
 {
 	gw->walk_bytes_remain -= nbytes;
-	scatterwalk_unmap(gw->walk_ptr);
-	scatterwalk_advance(&gw->walk, nbytes);
-	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+	if (out)
+		scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes);
+	else
+		scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes);
 	gw->walk_ptr = NULL;
 }
 
 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 {
@@ -842,11 +835,11 @@  static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 
 	while (1) {
 		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
 		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
 		gw->buf_bytes += n;
-		_gcm_sg_unmap_and_advance(gw, n);
+		_gcm_sg_unmap_and_advance(gw, n, false);
 		if (gw->buf_bytes >= minbytesneeded) {
 			gw->ptr = gw->buf;
 			gw->nbytes = gw->buf_bytes;
 			goto out;
 		}
@@ -902,11 +895,11 @@  static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 			memmove(gw->buf, gw->buf + bytesdone, n);
 			gw->buf_bytes = n;
 		} else
 			gw->buf_bytes = 0;
 	} else
-		_gcm_sg_unmap_and_advance(gw, bytesdone);
+		_gcm_sg_unmap_and_advance(gw, bytesdone, false);
 
 	return bytesdone;
 }
 
 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
@@ -920,14 +913,14 @@  static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 		for (i = 0; i < bytesdone; i += n) {
 			if (!_gcm_sg_clamp_and_map(gw))
 				return i;
 			n = min(gw->walk_bytes, bytesdone - i);
 			memcpy(gw->walk_ptr, gw->buf + i, n);
-			_gcm_sg_unmap_and_advance(gw, n);
+			_gcm_sg_unmap_and_advance(gw, n, true);
 		}
 	} else
-		_gcm_sg_unmap_and_advance(gw, bytesdone);
+		_gcm_sg_unmap_and_advance(gw, bytesdone, true);
 
 	return bytesdone;
 }
 
 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)