diff mbox series

[2/5] crypto: arm64/sha2-ce - clean up backwards function names

Message ID 20231010064127.323261-3-ebiggers@kernel.org (mailing list archive)
State Accepted
Delegated to: Herbert Xu
Headers show
Series crypto: arm64 - clean up backwards function names | expand

Commit Message

Eric Biggers Oct. 10, 2023, 6:41 a.m. UTC
From: Eric Biggers <ebiggers@google.com>

In the Linux kernel, a function whose name has two leading underscores
is conventionally called by the same-named function without leading
underscores -- not the other way around.  __sha2_ce_transform() and
__sha256_block_data_order() got this backwards.  Fix this, albeit
without changing "sha256_block_data_order" in the perlasm since that is
OpenSSL code.  No change in behavior.

Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/arm64/crypto/sha2-ce-core.S |  8 ++++----
 arch/arm64/crypto/sha2-ce-glue.c | 31 ++++++++++++++++---------------
 2 files changed, 20 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 491179922f498..fce84d88ddb2c 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -71,11 +71,11 @@ 
 	.word		0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
 
 	/*
-	 * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
-	 *			  int blocks)
+	 * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+	 *			     int blocks)
 	 */
 	.text
-SYM_FUNC_START(sha2_ce_transform)
+SYM_FUNC_START(__sha256_ce_transform)
 	/* load round constants */
 	adr_l		x8, .Lsha2_rcon
 	ld1		{ v0.4s- v3.4s}, [x8], #64
@@ -154,4 +154,4 @@  CPU_LE(	rev32		v19.16b, v19.16b	)
 3:	st1		{dgav.4s, dgbv.4s}, [x0]
 	mov		w0, w2
 	ret
-SYM_FUNC_END(sha2_ce_transform)
+SYM_FUNC_END(__sha256_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index c57a6119fefc5..cdeefdcbe101b 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -30,18 +30,19 @@  struct sha256_ce_state {
 extern const u32 sha256_ce_offsetof_count;
 extern const u32 sha256_ce_offsetof_finalize;
 
-asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
-				 int blocks);
+asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+				     int blocks);
 
-static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
 				int blocks)
 {
 	while (blocks) {
 		int rem;
 
 		kernel_neon_begin();
-		rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
-						     sst), src, blocks);
+		rem = __sha256_ce_transform(container_of(sst,
+							 struct sha256_ce_state,
+							 sst), src, blocks);
 		kernel_neon_end();
 		src += (blocks - rem) * SHA256_BLOCK_SIZE;
 		blocks = rem;
@@ -55,8 +56,8 @@  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
-static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
-				      int blocks)
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+				   int blocks)
 {
 	sha256_block_data_order(sst->state, src, blocks);
 }
@@ -68,10 +69,10 @@  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				__sha256_block_data_order);
+					     sha256_arm64_transform);
 
 	sctx->finalize = 0;
-	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha256_ce_transform);
 
 	return 0;
 }
@@ -85,8 +86,8 @@  static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				__sha256_block_data_order);
-		sha256_base_do_finalize(desc, __sha256_block_data_order);
+					      sha256_arm64_transform);
+		sha256_base_do_finalize(desc, sha256_arm64_transform);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -96,9 +97,9 @@  static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	 */
 	sctx->finalize = finalize;
 
-	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha256_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc, __sha2_ce_transform);
+		sha256_base_do_finalize(desc, sha256_ce_transform);
 	return sha256_base_finish(desc, out);
 }
 
@@ -107,12 +108,12 @@  static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc, __sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_arm64_transform);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
-	sha256_base_do_finalize(desc, __sha2_ce_transform);
+	sha256_base_do_finalize(desc, sha256_ce_transform);
 	return sha256_base_finish(desc, out);
 }