All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-12 22:30 ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-12 22:30 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel
  Cc: Eric Biggers, Kees Cook, linux-crypto, linux-arm-kernel,
	linux-kernel, Sami Tolvanen

Declare assembly functions with the expected function type
instead of casting pointers in C to avoid type mismatch failures
with Control-Flow Integrity (CFI) checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
 arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
 arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
 arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
 arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
 5 files changed, 43 insertions(+), 61 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..3153a9bbb683 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -25,7 +25,7 @@ struct sha1_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a4dacedfe4d4 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -25,7 +25,7 @@ struct sha256_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 						 finalize);
 
-asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
@@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..dac3157937ba 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha224");
 MODULE_ALIAS_CRYPTO("sha256");
 
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
-asmlinkage void sha256_block_neon(u32 *digest, const void *data,
-				  unsigned int num_blks);
+asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				  int blocks);
 
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
-	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 				     unsigned int len, u8 *out)
 {
 	if (len)
-		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_update(desc, data, len, sha256_block_data_order);
+	sha256_base_do_finalize(desc, sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..0f964235d753 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
 asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 				    int blocks);
 
-asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+					int blocks);
 
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..0f6b610a7954 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst,
+					u8 const *src, int blocks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
-	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out)
 {
 	if (len)
-		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_update(desc, data, len, sha512_block_data_order);
+	sha512_base_do_finalize(desc, sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
-- 
2.24.0.rc1.363.gb1bccd3e3d-goog


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-12 22:30 ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-12 22:30 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel
  Cc: Kees Cook, Eric Biggers, linux-kernel, linux-crypto,
	Sami Tolvanen, linux-arm-kernel

Declare assembly functions with the expected function type
instead of casting pointers in C to avoid type mismatch failures
with Control-Flow Integrity (CFI) checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
 arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
 arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
 arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
 arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
 5 files changed, 43 insertions(+), 61 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..3153a9bbb683 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -25,7 +25,7 @@ struct sha1_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a4dacedfe4d4 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -25,7 +25,7 @@ struct sha256_ce_state {
 	u32			finalize;
 };
 
-asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
 				  int blocks);
 
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 						 finalize);
 
-asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
@@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..dac3157937ba 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha224");
 MODULE_ALIAS_CRYPTO("sha256");
 
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+					int blocks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
-asmlinkage void sha256_block_neon(u32 *digest, const void *data,
-				  unsigned int num_blks);
+asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				  int blocks);
 
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
-	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 				     unsigned int len, u8 *out)
 {
 	if (len)
-		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_update(desc, data, len, sha256_block_data_order);
+	sha256_base_do_finalize(desc, sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				sha256_block_data_order);
+		sha256_base_do_finalize(desc, sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..0f964235d753 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
 asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 				    int blocks);
 
-asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+					int blocks);
 
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..0f6b610a7954 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
-					unsigned int num_blks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst,
+					u8 const *src, int blocks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
-	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out)
 {
 	if (len)
-		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_update(desc, data, len, sha512_block_data_order);
+	sha512_base_do_finalize(desc, sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
-- 
2.24.0.rc1.363.gb1bccd3e3d-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
  2019-11-12 22:30 ` Sami Tolvanen
@ 2019-11-13 18:27   ` Kees Cook
  -1 siblings, 0 replies; 34+ messages in thread
From: Kees Cook @ 2019-11-13 18:27 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, Eric Biggers, linux-crypto,
	linux-arm-kernel, linux-kernel

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

Looks good, yes. This looks very similar to what I needed to do for
x86's SHA routines.

Reviewed-by: Kees Cook <keescook@chromium.org>

> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> @@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	if (!finalize)
> -		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +		sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> @@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..a4dacedfe4d4 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha256_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
> +asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> @@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
>  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
>  						 finalize);
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
> @@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
> @@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	if (!finalize)
> -		sha256_base_do_finalize(desc,
> -					(sha256_block_fn *)sha2_ce_transform);
> +		sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> @@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
>  	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>  
>  	if (!crypto_simd_usable()) {
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..dac3157937ba 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha224");
>  MODULE_ALIAS_CRYPTO("sha256");
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  EXPORT_SYMBOL(sha256_block_data_order);
>  
> -asmlinkage void sha256_block_neon(u32 *digest, const void *data,
> -				  unsigned int num_blks);
> +asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
> +				  int blocks);
>  
>  static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
>  				      unsigned int len)
>  {
> -	return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
>  }
>  
>  static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
>  				     unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -	sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_update(desc, data, len, sha256_block_data_order);
> +	sha256_base_do_finalize(desc, sha256_block_data_order);
>  
>  	return sha256_base_finish(desc, out);
>  }
> @@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	while (len > 0) {
>  		unsigned int chunk = len;
> @@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  				sctx->count % SHA256_BLOCK_SIZE;
>  
>  		kernel_neon_begin();
> -		sha256_base_do_update(desc, data, chunk,
> -				      (sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
>  		kernel_neon_end();
>  		data += chunk;
>  		len -= chunk;
> @@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  	} else {
>  		if (len)
>  			sha256_update_neon(desc, data, len);
>  		kernel_neon_begin();
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_finalize(desc, sha256_block_neon);
>  		kernel_neon_end();
>  	}
>  	return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..0f964235d753 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
>  asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
>  				    int blocks);
>  
> -asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
>  {
>  	if (!crypto_simd_usable())
>  		return sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					     sha512_block_data_order);
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					      sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> @@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  static int sha512_ce_final(struct shash_desc *desc, u8 *out)
>  {
>  	if (!crypto_simd_usable()) {
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..0f6b610a7954 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha384");
>  MODULE_ALIAS_CRYPTO("sha512");
>  
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst,
> +					u8 const *src, int blocks);
>  EXPORT_SYMBOL(sha512_block_data_order);
>  
>  static int sha512_update(struct shash_desc *desc, const u8 *data,
>  			 unsigned int len)
>  {
> -	return sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> +	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
>  }
>  
>  static int sha512_finup(struct shash_desc *desc, const u8 *data,
>  			unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> -	sha512_base_do_finalize(desc,
> -			(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_update(desc, data, len, sha512_block_data_order);
> +	sha512_base_do_finalize(desc, sha512_block_data_order);
>  
>  	return sha512_base_finish(desc, out);
>  }
> 
> base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
> -- 
> 2.24.0.rc1.363.gb1bccd3e3d-goog
> 

-- 
Kees Cook

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-13 18:27   ` Kees Cook
  0 siblings, 0 replies; 34+ messages in thread
From: Kees Cook @ 2019-11-13 18:27 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Eric Biggers, Ard Biesheuvel, linux-kernel,
	linux-crypto, linux-arm-kernel

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

Looks good, yes. This looks very similar to what I needed to do for
x86's SHA routines.

Reviewed-by: Kees Cook <keescook@chromium.org>

> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> @@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha1_base_do_update(desc, data, len,
> -			    (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_update(desc, data, len, sha1_ce_transform);
>  	if (!finalize)
> -		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +		sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> @@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +	sha1_base_do_finalize(desc, sha1_ce_transform);
>  	kernel_neon_end();
>  	return sha1_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..a4dacedfe4d4 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha256_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
> +asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
>  				  int blocks);
>  
>  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> @@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
>  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
>  						 finalize);
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
> @@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
> @@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>  	sctx->finalize = finalize;
>  
>  	kernel_neon_begin();
> -	sha256_base_do_update(desc, data, len,
> -			      (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_update(desc, data, len, sha2_ce_transform);
>  	if (!finalize)
> -		sha256_base_do_finalize(desc,
> -					(sha256_block_fn *)sha2_ce_transform);
> +		sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> @@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
>  	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>  
>  	if (!crypto_simd_usable()) {
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  		return sha256_base_finish(desc, out);
>  	}
>  
>  	sctx->finalize = 0;
>  	kernel_neon_begin();
> -	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> +	sha256_base_do_finalize(desc, sha2_ce_transform);
>  	kernel_neon_end();
>  	return sha256_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..dac3157937ba 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha224");
>  MODULE_ALIAS_CRYPTO("sha256");
>  
> -asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> +					int blocks);
>  EXPORT_SYMBOL(sha256_block_data_order);
>  
> -asmlinkage void sha256_block_neon(u32 *digest, const void *data,
> -				  unsigned int num_blks);
> +asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
> +				  int blocks);
>  
>  static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
>  				      unsigned int len)
>  {
> -	return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
>  }
>  
>  static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
>  				     unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -	sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +		sha256_base_do_update(desc, data, len, sha256_block_data_order);
> +	sha256_base_do_finalize(desc, sha256_block_data_order);
>  
>  	return sha256_base_finish(desc, out);
>  }
> @@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  
>  	if (!crypto_simd_usable())
>  		return sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
>  
>  	while (len > 0) {
>  		unsigned int chunk = len;
> @@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>  				sctx->count % SHA256_BLOCK_SIZE;
>  
>  		kernel_neon_begin();
> -		sha256_base_do_update(desc, data, chunk,
> -				      (sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_update(desc, data, chunk, sha256_block_neon);
>  		kernel_neon_end();
>  		data += chunk;
>  		len -= chunk;
> @@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha256_base_do_update(desc, data, len,
> -				(sha256_block_fn *)sha256_block_data_order);
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_data_order);
> +				sha256_block_data_order);
> +		sha256_base_do_finalize(desc, sha256_block_data_order);
>  	} else {
>  		if (len)
>  			sha256_update_neon(desc, data, len);
>  		kernel_neon_begin();
> -		sha256_base_do_finalize(desc,
> -				(sha256_block_fn *)sha256_block_neon);
> +		sha256_base_do_finalize(desc, sha256_block_neon);
>  		kernel_neon_end();
>  	}
>  	return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..0f964235d753 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
>  asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
>  				    int blocks);
>  
> -asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
> +					int blocks);
>  
>  static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
>  			    unsigned int len)
>  {
>  	if (!crypto_simd_usable())
>  		return sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					     sha512_block_data_order);
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
>  	kernel_neon_end();
>  
>  	return 0;
> @@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  	if (!crypto_simd_usable()) {
>  		if (len)
>  			sha512_base_do_update(desc, data, len,
> -				(sha512_block_fn *)sha512_block_data_order);
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +					      sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_update(desc, data, len,
> -			      (sha512_block_fn *)sha512_ce_transform);
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_update(desc, data, len, sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> @@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  static int sha512_ce_final(struct shash_desc *desc, u8 *out)
>  {
>  	if (!crypto_simd_usable()) {
> -		sha512_base_do_finalize(desc,
> -				(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_finalize(desc, sha512_block_data_order);
>  		return sha512_base_finish(desc, out);
>  	}
>  
>  	kernel_neon_begin();
> -	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +	sha512_base_do_finalize(desc, sha512_ce_transform);
>  	kernel_neon_end();
>  	return sha512_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..0f6b610a7954 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha384");
>  MODULE_ALIAS_CRYPTO("sha512");
>  
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> -					unsigned int num_blks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst,
> +					u8 const *src, int blocks);
>  EXPORT_SYMBOL(sha512_block_data_order);
>  
>  static int sha512_update(struct shash_desc *desc, const u8 *data,
>  			 unsigned int len)
>  {
> -	return sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> +	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
>  }
>  
>  static int sha512_finup(struct shash_desc *desc, const u8 *data,
>  			unsigned int len, u8 *out)
>  {
>  	if (len)
> -		sha512_base_do_update(desc, data, len,
> -			(sha512_block_fn *)sha512_block_data_order);
> -	sha512_base_do_finalize(desc,
> -			(sha512_block_fn *)sha512_block_data_order);
> +		sha512_base_do_update(desc, data, len, sha512_block_data_order);
> +	sha512_base_do_finalize(desc, sha512_block_data_order);
>  
>  	return sha512_base_finish(desc, out);
>  }
> 
> base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
> -- 
> 2.24.0.rc1.363.gb1bccd3e3d-goog
> 

-- 
Kees Cook

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
  2019-11-12 22:30 ` Sami Tolvanen
@ 2019-11-13 20:04   ` Eric Biggers
  -1 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-13 20:04 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);

Please update the comments in the corresponding assembly files too.

Also, this change doesn't really make sense because the assembly functions still
expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
not present in struct sha1_state.  There should either be wrapper functions that
explicitly do the cast from sha1_state to sha1_ce_state, or there should be
comments in the assembly files that very clearly explain that although the
function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Likewise for SHA-256 and SHA-512.

- Eric

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-13 20:04   ` Eric Biggers
  0 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-13 20:04 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, linux-kernel, linux-crypto,
	linux-arm-kernel, Kees Cook

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
>  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
>  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
>  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
>  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
>  5 files changed, 43 insertions(+), 61 deletions(-)
> 
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
>  	u32			finalize;
>  };
>  
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
>  				  int blocks);

Please update the comments in the corresponding assembly files too.

Also, this change doesn't really make sense because the assembly functions still
expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
not present in struct sha1_state.  There should either be wrapper functions that
explicitly do the cast from sha1_state to sha1_ce_state, or there should be
comments in the assembly files that very clearly explain that although the
function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Likewise for SHA-256 and SHA-512.

- Eric

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
  2019-11-13 20:04   ` Eric Biggers
@ 2019-11-13 22:28     ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-13 22:28 UTC (permalink / raw)
  To: Sami Tolvanen, Herbert Xu, Ard Biesheuvel, Kees Cook,
	linux-crypto, linux-arm-kernel, LKML

On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
>
> On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > Declare assembly functions with the expected function type
> > instead of casting pointers in C to avoid type mismatch failures
> > with Control-Flow Integrity (CFI) checking.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> >  5 files changed, 43 insertions(+), 61 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..3153a9bbb683 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> >       u32                     finalize;
> >  };
> >
> > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> >                                 int blocks);
>
> Please update the comments in the corresponding assembly files too.
>
> Also, this change doesn't really make sense because the assembly functions still
> expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> not present in struct sha1_state.  There should either be wrapper functions that
> explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> comments in the assembly files that very clearly explain that although the
> function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Agreed, this needs a comment explaining the type mismatch. I'm also
fine with using wrapper functions and explicitly casting the
parameters instead of changing function declarations. Herbert, Ard,
any preferences?

Sami

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-13 22:28     ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-13 22:28 UTC (permalink / raw)
  To: Sami Tolvanen, Herbert Xu, Ard Biesheuvel, Kees Cook,
	linux-crypto, linux-arm-kernel, LKML

On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
>
> On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > Declare assembly functions with the expected function type
> > instead of casting pointers in C to avoid type mismatch failures
> > with Control-Flow Integrity (CFI) checking.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> >  5 files changed, 43 insertions(+), 61 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..3153a9bbb683 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> >       u32                     finalize;
> >  };
> >
> > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> >                                 int blocks);
>
> Please update the comments in the corresponding assembly files too.
>
> Also, this change doesn't really make sense because the assembly functions still
> expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> not present in struct sha1_state.  There should either be wrapper functions that
> explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> comments in the assembly files that very clearly explain that although the
> function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Agreed, this needs a comment explaining the type mismatch. I'm also
fine with using wrapper functions and explicitly casting the
parameters instead of changing function declarations. Herbert, Ard,
any preferences?

Sami

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
  2019-11-13 22:28     ` Sami Tolvanen
@ 2019-11-14  9:45       ` Ard Biesheuvel
  -1 siblings, 0 replies; 34+ messages in thread
From: Ard Biesheuvel @ 2019-11-14  9:45 UTC (permalink / raw)
  To: Sami Tolvanen; +Cc: Herbert Xu, Kees Cook, linux-crypto, linux-arm-kernel, LKML

On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
>
> On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> >
> > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > Declare assembly functions with the expected function type
> > > instead of casting pointers in C to avoid type mismatch failures
> > > with Control-Flow Integrity (CFI) checking.
> > >
> > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > ---
> > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > >
> > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > >       u32                     finalize;
> > >  };
> > >
> > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > >                                 int blocks);
> >
> > Please update the comments in the corresponding assembly files too.
> >
> > Also, this change doesn't really make sense because the assembly functions still
> > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > not present in struct sha1_state.  There should either be wrapper functions that
> > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > comments in the assembly files that very clearly explain that although the
> > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
>
> Agreed, this needs a comment explaining the type mismatch. I'm also
> fine with using wrapper functions and explicitly casting the
> parameters instead of changing function declarations. Herbert, Ard,
> any preferences?
>

I guess the former would be cleaner, using container_of() rather than
a blind cast to make the code more self-documenting. The extra branch
shouldn't really matter.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-14  9:45       ` Ard Biesheuvel
  0 siblings, 0 replies; 34+ messages in thread
From: Ard Biesheuvel @ 2019-11-14  9:45 UTC (permalink / raw)
  To: Sami Tolvanen; +Cc: linux-crypto, LKML, Herbert Xu, linux-arm-kernel, Kees Cook

On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
>
> On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> >
> > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > Declare assembly functions with the expected function type
> > > instead of casting pointers in C to avoid type mismatch failures
> > > with Control-Flow Integrity (CFI) checking.
> > >
> > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > ---
> > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > >
> > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > >       u32                     finalize;
> > >  };
> > >
> > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > >                                 int blocks);
> >
> > Please update the comments in the corresponding assembly files too.
> >
> > Also, this change doesn't really make sense because the assembly functions still
> > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > not present in struct sha1_state.  There should either be wrapper functions that
> > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > comments in the assembly files that very clearly explain that although the
> > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
>
> Agreed, this needs a comment explaining the type mismatch. I'm also
> fine with using wrapper functions and explicitly casting the
> parameters instead of changing function declarations. Herbert, Ard,
> any preferences?
>

I guess the former would be cleaner, using container_of() rather than
a blind cast to make the code more self-documenting. The extra branch
shouldn't really matter.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
  2019-11-14  9:45       ` Ard Biesheuvel
@ 2019-11-14 18:21         ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-14 18:21 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Kees Cook, linux-crypto, linux-arm-kernel, LKML

On Thu, Nov 14, 2019 at 1:45 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
> >
> > On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> > >
> > > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > > Declare assembly functions with the expected function type
> > > > instead of casting pointers in C to avoid type mismatch failures
> > > > with Control-Flow Integrity (CFI) checking.
> > > >
> > > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > > ---
> > > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > > >       u32                     finalize;
> > > >  };
> > > >
> > > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > > >                                 int blocks);
> > >
> > > Please update the comments in the corresponding assembly files too.
> > >
> > > Also, this change doesn't really make sense because the assembly functions still
> > > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > > not present in struct sha1_state.  There should either be wrapper functions that
> > > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > > comments in the assembly files that very clearly explain that although the
> > > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
> >
> > Agreed, this needs a comment explaining the type mismatch. I'm also
> > fine with using wrapper functions and explicitly casting the
> > parameters instead of changing function declarations. Herbert, Ard,
> > any preferences?
> >
>
> I guess the former would be cleaner, using container_of() rather than
> a blind cast to make the code more self-documenting. The extra branch
> shouldn't really matter.

Sure, using container_of() sounds like a better option, I'll use that
in v2. Thanks!

Sami

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH] crypto: arm64/sha: fix function types
@ 2019-11-14 18:21         ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-14 18:21 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: linux-crypto, LKML, Herbert Xu, linux-arm-kernel, Kees Cook

On Thu, Nov 14, 2019 at 1:45 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <samitolvanen@google.com> wrote:
> >
> > On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <ebiggers@kernel.org> wrote:
> > >
> > > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > > Declare assembly functions with the expected function type
> > > > instead of casting pointers in C to avoid type mismatch failures
> > > > with Control-Flow Integrity (CFI) checking.
> > > >
> > > > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > > > ---
> > > >  arch/arm64/crypto/sha1-ce-glue.c   | 12 +++++-------
> > > >  arch/arm64/crypto/sha2-ce-glue.c   | 26 +++++++++++---------------
> > > >  arch/arm64/crypto/sha256-glue.c    | 30 ++++++++++++------------------
> > > >  arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > > >  arch/arm64/crypto/sha512-glue.c    | 13 +++++--------
> > > >  5 files changed, 43 insertions(+), 61 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > > >       u32                     finalize;
> > > >  };
> > > >
> > > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > > >                                 int blocks);
> > >
> > > Please update the comments in the corresponding assembly files too.
> > >
> > > Also, this change doesn't really make sense because the assembly functions still
> > > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > > not present in struct sha1_state.  There should either be wrapper functions that
> > > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > > comments in the assembly files that very clearly explain that although the
> > > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
> >
> > Agreed, this needs a comment explaining the type mismatch. I'm also
> > fine with using wrapper functions and explicitly casting the
> > parameters instead of changing function declarations. Herbert, Ard,
> > any preferences?
> >
>
> I guess the former would be cleaner, using container_of() rather than
> a blind cast to make the code more self-documenting. The extra branch
> shouldn't really matter.

Sure, using container_of() sounds like a better option, I'll use that
in v2. Thanks!

Sami

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2] crypto: arm64/sha: fix function types
  2019-11-12 22:30 ` Sami Tolvanen
@ 2019-11-14 22:51   ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-14 22:51 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: Kees Cook, linux-crypto, linux-arm-kernel, linux-kernel, Sami Tolvanen

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..76a951ce2a7b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				       int blocks)
+{
+	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..6042555517b0 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static inline void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				       int blocks)
+{
+	return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static inline void __sha256_block_data_order(struct sha256_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..bb0f89382d70 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static inline void __sha256_block_data_order(struct sha256_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static inline void __sha256_block_neon(struct sha256_state *sst,
+				       u8 const *src, int blocks)
+{
+	return sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..076fcae454e0 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static inline void __sha512_block_data_order(struct sha512_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..125aac89c3c4 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static inline void __sha512_block_data_order(struct sha512_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 96b95eff4a591dbac582c2590d067e356a18aacb
-- 
2.24.0.432.g9d3f5f5b63-goog


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v2] crypto: arm64/sha: fix function types
@ 2019-11-14 22:51   ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-14 22:51 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: linux-kernel, Sami Tolvanen, Kees Cook, linux-arm-kernel, linux-crypto

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..76a951ce2a7b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				       int blocks)
+{
+	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..6042555517b0 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static inline void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				       int blocks)
+{
+	return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static inline void __sha256_block_data_order(struct sha256_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..bb0f89382d70 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static inline void __sha256_block_data_order(struct sha256_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static inline void __sha256_block_neon(struct sha256_state *sst,
+				       u8 const *src, int blocks)
+{
+	return sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..076fcae454e0 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static inline void __sha512_block_data_order(struct sha512_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..125aac89c3c4 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static inline void __sha512_block_data_order(struct sha512_state *sst,
+					     u8 const *src, int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 96b95eff4a591dbac582c2590d067e356a18aacb
-- 
2.24.0.432.g9d3f5f5b63-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH v2] crypto: arm64/sha: fix function types
  2019-11-14 22:51   ` Sami Tolvanen
@ 2019-11-15 11:32     ` Ard Biesheuvel
  -1 siblings, 0 replies; 34+ messages in thread
From: Ard Biesheuvel @ 2019-11-15 11:32 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Eric Biggers, Kees Cook,
	open list:HARDWARE RANDOM NUMBER GENERATOR CORE,
	linux-arm-kernel, Linux Kernel Mailing List

On Thu, 14 Nov 2019 at 22:51, Sami Tolvanen <samitolvanen@google.com> wrote:
>
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
>
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)
>
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..76a951ce2a7b 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -28,6 +28,13 @@ struct sha1_ce_state {
>  asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
>                                   int blocks);
>
> +static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> +                                      int blocks)

Nit: making a function inline when all we ever do is take its address
is rather pointless, so please drop that (below as well)

With that fixed (and assuming that the crypto selftests still pass -
please confirm that you've tried that)

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>


> +{
> +       return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> +                                src, blocks);
> +}
> +
>  const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
>  const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
>
> @@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha1_base_do_update(desc, data, len,
> -                           (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_update(desc, data, len, __sha1_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>         sctx->finalize = finalize;
>
>         kernel_neon_begin();
> -       sha1_base_do_update(desc, data, len,
> -                           (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_update(desc, data, len, __sha1_ce_transform);
>         if (!finalize)
> -               sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +               sha1_base_do_finalize(desc, __sha1_ce_transform);
>         kernel_neon_end();
>         return sha1_base_finish(desc, out);
>  }
> @@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_finalize(desc, __sha1_ce_transform);
>         kernel_neon_end();
>         return sha1_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..6042555517b0 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -28,6 +28,13 @@ struct sha256_ce_state {
>  asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
>                                   int blocks);
>
> +static inline void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
> +                                      int blocks)
> +{
> +       return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
> +                                src, blocks);
> +}
> +
>  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
>                                               sst.count);
>  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> @@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
>
>  asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
>
> +static inline void __sha256_block_data_order(struct sha256_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha256_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>                             unsigned int len)
>  {
> @@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>
>         if (!crypto_simd_usable())
>                 return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha256_base_do_update(desc, data, len,
> -                             (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>                 return sha256_base_finish(desc, out);
>         }
>
> @@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>         sctx->finalize = finalize;
>
>         kernel_neon_begin();
> -       sha256_base_do_update(desc, data, len,
> -                             (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
>         if (!finalize)
> -               sha256_base_do_finalize(desc,
> -                                       (sha256_block_fn *)sha2_ce_transform);
> +               sha256_base_do_finalize(desc, __sha2_ce_transform);
>         kernel_neon_end();
>         return sha256_base_finish(desc, out);
>  }
> @@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
>         struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>
>         if (!crypto_simd_usable()) {
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>                 return sha256_base_finish(desc, out);
>         }
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_finalize(desc, __sha2_ce_transform);
>         kernel_neon_end();
>         return sha256_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..bb0f89382d70 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
>                                         unsigned int num_blks);
>  EXPORT_SYMBOL(sha256_block_data_order);
>
> +static inline void __sha256_block_data_order(struct sha256_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha256_block_data_order(sst->state, src, blocks);
> +}
> +
>  asmlinkage void sha256_block_neon(u32 *digest, const void *data,
>                                   unsigned int num_blks);
>
> +static inline void __sha256_block_neon(struct sha256_state *sst,
> +                                      u8 const *src, int blocks)
> +{
> +       return sha256_block_neon(sst->state, src, blocks);
> +}
> +
>  static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
>                                       unsigned int len)
>  {
>         return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                                    __sha256_block_data_order);
>  }
>
>  static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
> @@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
>  {
>         if (len)
>                 sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -       sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                                     __sha256_block_data_order);
> +       sha256_base_do_finalize(desc, __sha256_block_data_order);
>
>         return sha256_base_finish(desc, out);
>  }
> @@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>
>         if (!crypto_simd_usable())
>                 return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
>
>         while (len > 0) {
>                 unsigned int chunk = len;
> @@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>                                 sctx->count % SHA256_BLOCK_SIZE;
>
>                 kernel_neon_begin();
> -               sha256_base_do_update(desc, data, chunk,
> -                                     (sha256_block_fn *)sha256_block_neon);
> +               sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
>                 kernel_neon_end();
>                 data += chunk;
>                 len -= chunk;
> @@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>         } else {
>                 if (len)
>                         sha256_update_neon(desc, data, len);
>                 kernel_neon_begin();
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_neon);
> +               sha256_base_do_finalize(desc, __sha256_block_neon);
>                 kernel_neon_end();
>         }
>         return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..076fcae454e0 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
>
>  asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
>
> +static inline void __sha512_block_data_order(struct sha512_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha512_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
>                             unsigned int len)
>  {
>         if (!crypto_simd_usable())
>                 return sha512_base_do_update(desc, data, len,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +                                            __sha512_block_data_order);
>
>         kernel_neon_begin();
> -       sha512_base_do_update(desc, data, len,
> -                             (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_update(desc, data, len, sha512_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha512_base_do_update(desc, data, len,
> -                               (sha512_block_fn *)sha512_block_data_order);
> -               sha512_base_do_finalize(desc,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +                                             __sha512_block_data_order);
> +               sha512_base_do_finalize(desc, __sha512_block_data_order);
>                 return sha512_base_finish(desc, out);
>         }
>
>         kernel_neon_begin();
> -       sha512_base_do_update(desc, data, len,
> -                             (sha512_block_fn *)sha512_ce_transform);
> -       sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_update(desc, data, len, sha512_ce_transform);
> +       sha512_base_do_finalize(desc, sha512_ce_transform);
>         kernel_neon_end();
>         return sha512_base_finish(desc, out);
>  }
> @@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  static int sha512_ce_final(struct shash_desc *desc, u8 *out)
>  {
>         if (!crypto_simd_usable()) {
> -               sha512_base_do_finalize(desc,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +               sha512_base_do_finalize(desc, __sha512_block_data_order);
>                 return sha512_base_finish(desc, out);
>         }
>
>         kernel_neon_begin();
> -       sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_finalize(desc, sha512_ce_transform);
>         kernel_neon_end();
>         return sha512_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..125aac89c3c4 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha384");
>  MODULE_ALIAS_CRYPTO("sha512");
>
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> +asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
>                                         unsigned int num_blks);
>  EXPORT_SYMBOL(sha512_block_data_order);
>
> +static inline void __sha512_block_data_order(struct sha512_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha512_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha512_update(struct shash_desc *desc, const u8 *data,
>                          unsigned int len)
>  {
>         return sha512_base_do_update(desc, data, len,
> -                       (sha512_block_fn *)sha512_block_data_order);
> +                                    __sha512_block_data_order);
>  }
>
>  static int sha512_finup(struct shash_desc *desc, const u8 *data,
> @@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
>  {
>         if (len)
>                 sha512_base_do_update(desc, data, len,
> -                       (sha512_block_fn *)sha512_block_data_order);
> -       sha512_base_do_finalize(desc,
> -                       (sha512_block_fn *)sha512_block_data_order);
> +                                     __sha512_block_data_order);
> +       sha512_base_do_finalize(desc, __sha512_block_data_order);
>
>         return sha512_base_finish(desc, out);
>  }
>
> base-commit: 96b95eff4a591dbac582c2590d067e356a18aacb
> --
> 2.24.0.432.g9d3f5f5b63-goog
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2] crypto: arm64/sha: fix function types
@ 2019-11-15 11:32     ` Ard Biesheuvel
  0 siblings, 0 replies; 34+ messages in thread
From: Ard Biesheuvel @ 2019-11-15 11:32 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Eric Biggers, Linux Kernel Mailing List,
	open list:HARDWARE RANDOM NUMBER GENERATOR CORE,
	linux-arm-kernel, Kees Cook

On Thu, 14 Nov 2019 at 22:51, Sami Tolvanen <samitolvanen@google.com> wrote:
>
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
>
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)
>
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..76a951ce2a7b 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -28,6 +28,13 @@ struct sha1_ce_state {
>  asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
>                                   int blocks);
>
> +static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> +                                      int blocks)

Nit: making a function inline when all we ever do is take its address
is rather pointless, so please drop that (below as well)

With that fixed (and assuming that the crypto selftests still pass -
please confirm that you've tried that)

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>


> +{
> +       return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> +                                src, blocks);
> +}
> +
>  const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
>  const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
>
> @@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha1_base_do_update(desc, data, len,
> -                           (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_update(desc, data, len, __sha1_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>         sctx->finalize = finalize;
>
>         kernel_neon_begin();
> -       sha1_base_do_update(desc, data, len,
> -                           (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_update(desc, data, len, __sha1_ce_transform);
>         if (!finalize)
> -               sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +               sha1_base_do_finalize(desc, __sha1_ce_transform);
>         kernel_neon_end();
>         return sha1_base_finish(desc, out);
>  }
> @@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> +       sha1_base_do_finalize(desc, __sha1_ce_transform);
>         kernel_neon_end();
>         return sha1_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..6042555517b0 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -28,6 +28,13 @@ struct sha256_ce_state {
>  asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
>                                   int blocks);
>
> +static inline void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
> +                                      int blocks)
> +{
> +       return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
> +                                src, blocks);
> +}
> +
>  const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
>                                               sst.count);
>  const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> @@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
>
>  asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
>
> +static inline void __sha256_block_data_order(struct sha256_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha256_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>                             unsigned int len)
>  {
> @@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>
>         if (!crypto_simd_usable())
>                 return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha256_base_do_update(desc, data, len,
> -                             (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>                 return sha256_base_finish(desc, out);
>         }
>
> @@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>         sctx->finalize = finalize;
>
>         kernel_neon_begin();
> -       sha256_base_do_update(desc, data, len,
> -                             (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_update(desc, data, len, __sha2_ce_transform);
>         if (!finalize)
> -               sha256_base_do_finalize(desc,
> -                                       (sha256_block_fn *)sha2_ce_transform);
> +               sha256_base_do_finalize(desc, __sha2_ce_transform);
>         kernel_neon_end();
>         return sha256_base_finish(desc, out);
>  }
> @@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
>         struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>
>         if (!crypto_simd_usable()) {
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>                 return sha256_base_finish(desc, out);
>         }
>
>         sctx->finalize = 0;
>         kernel_neon_begin();
> -       sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> +       sha256_base_do_finalize(desc, __sha2_ce_transform);
>         kernel_neon_end();
>         return sha256_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..bb0f89382d70 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
>                                         unsigned int num_blks);
>  EXPORT_SYMBOL(sha256_block_data_order);
>
> +static inline void __sha256_block_data_order(struct sha256_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha256_block_data_order(sst->state, src, blocks);
> +}
> +
>  asmlinkage void sha256_block_neon(u32 *digest, const void *data,
>                                   unsigned int num_blks);
>
> +static inline void __sha256_block_neon(struct sha256_state *sst,
> +                                      u8 const *src, int blocks)
> +{
> +       return sha256_block_neon(sst->state, src, blocks);
> +}
> +
>  static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
>                                       unsigned int len)
>  {
>         return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                                    __sha256_block_data_order);
>  }
>
>  static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
> @@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
>  {
>         if (len)
>                 sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -       sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                                     __sha256_block_data_order);
> +       sha256_base_do_finalize(desc, __sha256_block_data_order);
>
>         return sha256_base_finish(desc, out);
>  }
> @@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>
>         if (!crypto_simd_usable())
>                 return sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
>
>         while (len > 0) {
>                 unsigned int chunk = len;
> @@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>                                 sctx->count % SHA256_BLOCK_SIZE;
>
>                 kernel_neon_begin();
> -               sha256_base_do_update(desc, data, chunk,
> -                                     (sha256_block_fn *)sha256_block_neon);
> +               sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
>                 kernel_neon_end();
>                 data += chunk;
>                 len -= chunk;
> @@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha256_base_do_update(desc, data, len,
> -                               (sha256_block_fn *)sha256_block_data_order);
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_data_order);
> +                               __sha256_block_data_order);
> +               sha256_base_do_finalize(desc, __sha256_block_data_order);
>         } else {
>                 if (len)
>                         sha256_update_neon(desc, data, len);
>                 kernel_neon_begin();
> -               sha256_base_do_finalize(desc,
> -                               (sha256_block_fn *)sha256_block_neon);
> +               sha256_base_do_finalize(desc, __sha256_block_neon);
>                 kernel_neon_end();
>         }
>         return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..076fcae454e0 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
>
>  asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
>
> +static inline void __sha512_block_data_order(struct sha512_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha512_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
>                             unsigned int len)
>  {
>         if (!crypto_simd_usable())
>                 return sha512_base_do_update(desc, data, len,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +                                            __sha512_block_data_order);
>
>         kernel_neon_begin();
> -       sha512_base_do_update(desc, data, len,
> -                             (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_update(desc, data, len, sha512_ce_transform);
>         kernel_neon_end();
>
>         return 0;
> @@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>         if (!crypto_simd_usable()) {
>                 if (len)
>                         sha512_base_do_update(desc, data, len,
> -                               (sha512_block_fn *)sha512_block_data_order);
> -               sha512_base_do_finalize(desc,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +                                             __sha512_block_data_order);
> +               sha512_base_do_finalize(desc, __sha512_block_data_order);
>                 return sha512_base_finish(desc, out);
>         }
>
>         kernel_neon_begin();
> -       sha512_base_do_update(desc, data, len,
> -                             (sha512_block_fn *)sha512_ce_transform);
> -       sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_update(desc, data, len, sha512_ce_transform);
> +       sha512_base_do_finalize(desc, sha512_ce_transform);
>         kernel_neon_end();
>         return sha512_base_finish(desc, out);
>  }
> @@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
>  static int sha512_ce_final(struct shash_desc *desc, u8 *out)
>  {
>         if (!crypto_simd_usable()) {
> -               sha512_base_do_finalize(desc,
> -                               (sha512_block_fn *)sha512_block_data_order);
> +               sha512_base_do_finalize(desc, __sha512_block_data_order);
>                 return sha512_base_finish(desc, out);
>         }
>
>         kernel_neon_begin();
> -       sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> +       sha512_base_do_finalize(desc, sha512_ce_transform);
>         kernel_neon_end();
>         return sha512_base_finish(desc, out);
>  }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..125aac89c3c4 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
>  MODULE_ALIAS_CRYPTO("sha384");
>  MODULE_ALIAS_CRYPTO("sha512");
>
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> +asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
>                                         unsigned int num_blks);
>  EXPORT_SYMBOL(sha512_block_data_order);
>
> +static inline void __sha512_block_data_order(struct sha512_state *sst,
> +                                            u8 const *src, int blocks)
> +{
> +       return sha512_block_data_order(sst->state, src, blocks);
> +}
> +
>  static int sha512_update(struct shash_desc *desc, const u8 *data,
>                          unsigned int len)
>  {
>         return sha512_base_do_update(desc, data, len,
> -                       (sha512_block_fn *)sha512_block_data_order);
> +                                    __sha512_block_data_order);
>  }
>
>  static int sha512_finup(struct shash_desc *desc, const u8 *data,
> @@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
>  {
>         if (len)
>                 sha512_base_do_update(desc, data, len,
> -                       (sha512_block_fn *)sha512_block_data_order);
> -       sha512_base_do_finalize(desc,
> -                       (sha512_block_fn *)sha512_block_data_order);
> +                                     __sha512_block_data_order);
> +       sha512_base_do_finalize(desc, __sha512_block_data_order);
>
>         return sha512_base_finish(desc, out);
>  }
>
> base-commit: 96b95eff4a591dbac582c2590d067e356a18aacb
> --
> 2.24.0.432.g9d3f5f5b63-goog
>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2] crypto: arm64/sha: fix function types
  2019-11-15 11:32     ` Ard Biesheuvel
@ 2019-11-19 20:01       ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-19 20:01 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Eric Biggers, Kees Cook,
	open list:HARDWARE RANDOM NUMBER GENERATOR CORE,
	linux-arm-kernel, Linux Kernel Mailing List

On Fri, Nov 15, 2019 at 3:32 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Thu, 14 Nov 2019 at 22:51, Sami Tolvanen <samitolvanen@google.com> wrote:
> >
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
> >  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
> >  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> >  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
> >  5 files changed, 76 insertions(+), 48 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..76a951ce2a7b 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -28,6 +28,13 @@ struct sha1_ce_state {
> >  asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> >                                   int blocks);
> >
> > +static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > +                                      int blocks)
>
> Nit: making a function inline when all we ever do is take its address
> is rather pointless, so please drop that (below as well)

Ack, I'll send v3 that removes the extra inlines shortly.

> With that fixed (and assuming that the crypto selftests still pass -
> please confirm that you've tried that)

I don't have a test device that supports sha512-ce, but self-tests for
everything else pass with these changes.

Sami

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2] crypto: arm64/sha: fix function types
@ 2019-11-19 20:01       ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-19 20:01 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Eric Biggers, Linux Kernel Mailing List,
	open list:HARDWARE RANDOM NUMBER GENERATOR CORE,
	linux-arm-kernel, Kees Cook

On Fri, Nov 15, 2019 at 3:32 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Thu, 14 Nov 2019 at 22:51, Sami Tolvanen <samitolvanen@google.com> wrote:
> >
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> >
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
> >  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
> >  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> >  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
> >  5 files changed, 76 insertions(+), 48 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..76a951ce2a7b 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -28,6 +28,13 @@ struct sha1_ce_state {
> >  asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> >                                   int blocks);
> >
> > +static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > +                                      int blocks)
>
> Nit: making a function inline when all we ever do is take its address
> is rather pointless, so please drop that (below as well)

Ack, I'll send v3 that removes the extra inlines shortly.

> With that fixed (and assuming that the crypto selftests still pass -
> please confirm that you've tried that)

I don't have a test device that supports sha512-ce, but self-tests for
everything else pass with these changes.

Sami

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v3] crypto: arm64/sha: fix function types
  2019-11-12 22:30 ` Sami Tolvanen
@ 2019-11-19 20:13   ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-19 20:13 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: Kees Cook, linux-crypto, linux-arm-kernel, linux-kernel,
	Sami Tolvanen, Ard Biesheuvel

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
---
Changes in v3:
  - Removed unnecessary inline attributes.

Changes in v2:
  - Added wrapper functions instead of changing parameter types
    for the assembly functions.

---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..ee8d9f0edb1a 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..2b05e95ab2b5 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..03ed0358c6eb 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..659c913cd056 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..86045ae596b1 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: fd8f64df95204951c3edd4c4a7817c909d55a100
-- 
2.24.0.432.g9d3f5f5b63-goog


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v3] crypto: arm64/sha: fix function types
@ 2019-11-19 20:13   ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-19 20:13 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: Kees Cook, linux-kernel, linux-crypto, Sami Tolvanen,
	Ard Biesheuvel, linux-arm-kernel

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
---
Changes in v3:
  - Removed unnecessary inline attributes.

Changes in v2:
  - Added wrapper functions instead of changing parameter types
    for the assembly functions.

---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..ee8d9f0edb1a 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..2b05e95ab2b5 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+				 src, blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..03ed0358c6eb 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	return sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..659c913cd056 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..86045ae596b1 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	return sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: fd8f64df95204951c3edd4c4a7817c909d55a100
-- 
2.24.0.432.g9d3f5f5b63-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
  2019-11-19 20:13   ` Sami Tolvanen
@ 2019-11-22 11:04     ` Herbert Xu
  -1 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-11-22 11:04 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Ard Biesheuvel, Eric Biggers, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel, Ard Biesheuvel

On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)

Patch applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
@ 2019-11-22 11:04     ` Herbert Xu
  0 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-11-22 11:04 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Kees Cook, Eric Biggers, Ard Biesheuvel, linux-kernel,
	linux-crypto, Ard Biesheuvel, linux-arm-kernel

On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)

Patch applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
  2019-11-22 11:04     ` Herbert Xu
@ 2019-11-22 11:05       ` Herbert Xu
  -1 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-11-22 11:05 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Ard Biesheuvel, Eric Biggers, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel, Ard Biesheuvel

On Fri, Nov 22, 2019 at 07:04:46PM +0800, Herbert Xu wrote:
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> > 
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> > ---
> > Changes in v3:
> >   - Removed unnecessary inline attributes.
> > 
> > Changes in v2:
> >   - Added wrapper functions instead of changing parameter types
> >     for the assembly functions.
> > 
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
> >  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
> >  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> >  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
> >  5 files changed, 76 insertions(+), 48 deletions(-)
> 
> Patch applied.  Thanks.

Scratch that.  This patch is still in the queue.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
@ 2019-11-22 11:05       ` Herbert Xu
  0 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-11-22 11:05 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Kees Cook, Eric Biggers, Ard Biesheuvel, linux-kernel,
	linux-crypto, Ard Biesheuvel, linux-arm-kernel

On Fri, Nov 22, 2019 at 07:04:46PM +0800, Herbert Xu wrote:
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> > 
> > Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> > Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> > ---
> > Changes in v3:
> >   - Removed unnecessary inline attributes.
> > 
> > Changes in v2:
> >   - Added wrapper functions instead of changing parameter types
> >     for the assembly functions.
> > 
> > ---
> >  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
> >  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
> >  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
> >  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> >  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
> >  5 files changed, 76 insertions(+), 48 deletions(-)
> 
> Patch applied.  Thanks.

Scratch that.  This patch is still in the queue.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
  2019-11-19 20:13   ` Sami Tolvanen
@ 2019-11-27 18:19     ` Eric Biggers
  -1 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-27 18:19 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel, Ard Biesheuvel

On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> +				int blocks)
> +{
> +	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> +				 src, blocks);
> +}
> +

'return' isn't needed in functions that return void.

Likewise everywhere else in this patch.

Otherwise this patch looks fine.

- Eric

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
@ 2019-11-27 18:19     ` Eric Biggers
  0 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-27 18:19 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, linux-kernel, linux-crypto,
	Ard Biesheuvel, linux-arm-kernel, Kees Cook

On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> +				int blocks)
> +{
> +	return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> +				 src, blocks);
> +}
> +

'return' isn't needed in functions that return void.

Likewise everywhere else in this patch.

Otherwise this patch looks fine.

- Eric

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
  2019-11-27 18:19     ` Eric Biggers
@ 2019-11-27 23:42       ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-27 23:42 UTC (permalink / raw)
  To: Eric Biggers
  Cc: Herbert Xu, Ard Biesheuvel, Kees Cook, linux-crypto,
	linux-arm-kernel, LKML, Ard Biesheuvel

On Wed, Nov 27, 2019 at 10:19 AM Eric Biggers <ebiggers@kernel.org> wrote:
>
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > +                             int blocks)
> > +{
> > +     return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> > +                              src, blocks);
> > +}
> > +
>
> 'return' isn't needed in functions that return void.
>
> Likewise everywhere else in this patch.
>
> Otherwise this patch looks fine.

Thanks for taking a look, Eric. I'll send out v4 with this fixed.

Sami

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v3] crypto: arm64/sha: fix function types
@ 2019-11-27 23:42       ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-27 23:42 UTC (permalink / raw)
  To: Eric Biggers
  Cc: Herbert Xu, Ard Biesheuvel, LKML, linux-crypto, Ard Biesheuvel,
	linux-arm-kernel, Kees Cook

On Wed, Nov 27, 2019 at 10:19 AM Eric Biggers <ebiggers@kernel.org> wrote:
>
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > +                             int blocks)
> > +{
> > +     return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> > +                              src, blocks);
> > +}
> > +
>
> 'return' isn't needed in functions that return void.
>
> Likewise everywhere else in this patch.
>
> Otherwise this patch looks fine.

Thanks for taking a look, Eric. I'll send out v4 with this fixed.

Sami

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v4] crypto: arm64/sha: fix function types
  2019-11-12 22:30 ` Sami Tolvanen
@ 2019-11-27 23:55   ` Sami Tolvanen
  -1 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-27 23:55 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: Kees Cook, linux-crypto, linux-arm-kernel, linux-kernel,
	Sami Tolvanen, Ard Biesheuvel

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
---
Changes in v4:
  - Removed unnecessary returns.

Changes in v3:
  - Removed unnecessary inline attributes.

Changes in v2:
  - Added wrapper functions instead of changing parameter types
    for the assembly functions.

---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..63c875d3314b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				int blocks)
+{
+	sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
+			  blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a8e67bafba3d 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
+			  blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..01e0ab36d135 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..dc890a719f54 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..78d3083de6b7 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 95f1fa9e3418d50ce099e67280b5497b9c93843b
-- 
2.24.0.432.g9d3f5f5b63-goog


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v4] crypto: arm64/sha: fix function types
@ 2019-11-27 23:55   ` Sami Tolvanen
  0 siblings, 0 replies; 34+ messages in thread
From: Sami Tolvanen @ 2019-11-27 23:55 UTC (permalink / raw)
  To: Herbert Xu, Ard Biesheuvel, Eric Biggers
  Cc: Kees Cook, linux-kernel, linux-crypto, Sami Tolvanen,
	Ard Biesheuvel, linux-arm-kernel

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
---
Changes in v4:
  - Removed unnecessary returns.

Changes in v3:
  - Removed unnecessary inline attributes.

Changes in v2:
  - Added wrapper functions instead of changing parameter types
    for the assembly functions.

---
 arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
 arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
 arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
 arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
 arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
 5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..63c875d3314b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+				int blocks)
+{
+	sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
+			  blocks);
+}
+
 const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
 const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
 
@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha1_base_do_update(desc, data, len,
-			    (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_update(desc, data, len, __sha1_ce_transform);
 	if (!finalize)
-		sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+		sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+	sha1_base_do_finalize(desc, __sha1_ce_transform);
 	kernel_neon_end();
 	return sha1_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a8e67bafba3d 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
 				  int blocks);
 
+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
+			  blocks);
+}
+
 const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
 					      sst.count);
 const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
 
 asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha256_block_data_order(sst->state, src, blocks);
+}
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	sctx->finalize = finalize;
 
 	kernel_neon_begin();
-	sha256_base_do_update(desc, data, len,
-			      (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_update(desc, data, len, __sha2_ce_transform);
 	if (!finalize)
-		sha256_base_do_finalize(desc,
-					(sha256_block_fn *)sha2_ce_transform);
+		sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
 	if (!crypto_simd_usable()) {
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 		return sha256_base_finish(desc, out);
 	}
 
 	sctx->finalize = 0;
 	kernel_neon_begin();
-	sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+	sha256_base_do_finalize(desc, __sha2_ce_transform);
 	kernel_neon_end();
 	return sha256_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..01e0ab36d135 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha256_block_data_order);
 
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha256_block_data_order(sst->state, src, blocks);
+}
+
 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
 				  unsigned int num_blks);
 
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+				int blocks)
+{
+	sha256_block_neon(sst->state, src, blocks);
+}
+
 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
 				      unsigned int len)
 {
 	return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				     __sha256_block_data_order);
 }
 
 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-	sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				      __sha256_block_data_order);
+	sha256_base_do_finalize(desc, __sha256_block_data_order);
 
 	return sha256_base_finish(desc, out);
 }
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 
 	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
 
 	while (len > 0) {
 		unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 				sctx->count % SHA256_BLOCK_SIZE;
 
 		kernel_neon_begin();
-		sha256_base_do_update(desc, data, chunk,
-				      (sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
 		kernel_neon_end();
 		data += chunk;
 		len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
-				(sha256_block_fn *)sha256_block_data_order);
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_data_order);
+				__sha256_block_data_order);
+		sha256_base_do_finalize(desc, __sha256_block_data_order);
 	} else {
 		if (len)
 			sha256_update_neon(desc, data, len);
 		kernel_neon_begin();
-		sha256_base_do_finalize(desc,
-				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc, __sha256_block_neon);
 		kernel_neon_end();
 	}
 	return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..dc890a719f54 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
 
 asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
 	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
+					     __sha512_block_data_order);
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
 	kernel_neon_end();
 
 	return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
-				(sha512_block_fn *)sha512_block_data_order);
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+					      __sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_update(desc, data, len,
-			      (sha512_block_fn *)sha512_ce_transform);
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_update(desc, data, len, sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
 	if (!crypto_simd_usable()) {
-		sha512_base_do_finalize(desc,
-				(sha512_block_fn *)sha512_block_data_order);
+		sha512_base_do_finalize(desc, __sha512_block_data_order);
 		return sha512_base_finish(desc, out);
 	}
 
 	kernel_neon_begin();
-	sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+	sha512_base_do_finalize(desc, sha512_ce_transform);
 	kernel_neon_end();
 	return sha512_base_finish(desc, out);
 }
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..78d3083de6b7 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("sha384");
 MODULE_ALIAS_CRYPTO("sha512");
 
-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
 					unsigned int num_blks);
 EXPORT_SYMBOL(sha512_block_data_order);
 
+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+				      int blocks)
+{
+	sha512_block_data_order(sst->state, src, blocks);
+}
+
 static int sha512_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
 	return sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
+				     __sha512_block_data_order);
 }
 
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
 {
 	if (len)
 		sha512_base_do_update(desc, data, len,
-			(sha512_block_fn *)sha512_block_data_order);
-	sha512_base_do_finalize(desc,
-			(sha512_block_fn *)sha512_block_data_order);
+				      __sha512_block_data_order);
+	sha512_base_do_finalize(desc, __sha512_block_data_order);
 
 	return sha512_base_finish(desc, out);
 }

base-commit: 95f1fa9e3418d50ce099e67280b5497b9c93843b
-- 
2.24.0.432.g9d3f5f5b63-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH v4] crypto: arm64/sha: fix function types
  2019-11-27 23:55   ` Sami Tolvanen
@ 2019-11-28  0:25     ` Eric Biggers
  -1 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-28  0:25 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel, Ard Biesheuvel

On Wed, Nov 27, 2019 at 03:55:03PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v4:
>   - Removed unnecessary returns.
> 
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 

Reviewed-by: Eric Biggers <ebiggers@kernel.org>

- Eric

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v4] crypto: arm64/sha: fix function types
@ 2019-11-28  0:25     ` Eric Biggers
  0 siblings, 0 replies; 34+ messages in thread
From: Eric Biggers @ 2019-11-28  0:25 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Herbert Xu, Ard Biesheuvel, linux-kernel, linux-crypto,
	Ard Biesheuvel, linux-arm-kernel, Kees Cook

On Wed, Nov 27, 2019 at 03:55:03PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v4:
>   - Removed unnecessary returns.
> 
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 

Reviewed-by: Eric Biggers <ebiggers@kernel.org>

- Eric

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v4] crypto: arm64/sha: fix function types
  2019-11-27 23:55   ` Sami Tolvanen
@ 2019-12-11  9:40     ` Herbert Xu
  -1 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-12-11  9:40 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Ard Biesheuvel, Eric Biggers, Kees Cook, linux-crypto,
	linux-arm-kernel, linux-kernel, Ard Biesheuvel

On Wed, Nov 27, 2019 at 03:55:03PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v4:
>   - Removed unnecessary returns.
> 
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)

Patch applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v4] crypto: arm64/sha: fix function types
@ 2019-12-11  9:40     ` Herbert Xu
  0 siblings, 0 replies; 34+ messages in thread
From: Herbert Xu @ 2019-12-11  9:40 UTC (permalink / raw)
  To: Sami Tolvanen
  Cc: Kees Cook, Eric Biggers, Ard Biesheuvel, linux-kernel,
	linux-crypto, Ard Biesheuvel, linux-arm-kernel

On Wed, Nov 27, 2019 at 03:55:03PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Changes in v4:
>   - Removed unnecessary returns.
> 
> Changes in v3:
>   - Removed unnecessary inline attributes.
> 
> Changes in v2:
>   - Added wrapper functions instead of changing parameter types
>     for the assembly functions.
> 
> ---
>  arch/arm64/crypto/sha1-ce-glue.c   | 17 +++++++++------
>  arch/arm64/crypto/sha2-ce-glue.c   | 34 ++++++++++++++++++------------
>  arch/arm64/crypto/sha256-glue.c    | 32 +++++++++++++++++-----------
>  arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
>  arch/arm64/crypto/sha512-glue.c    | 15 ++++++++-----
>  5 files changed, 76 insertions(+), 48 deletions(-)

Patch applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2019-12-11  9:40 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-12 22:30 [PATCH] crypto: arm64/sha: fix function types Sami Tolvanen
2019-11-12 22:30 ` Sami Tolvanen
2019-11-13 18:27 ` Kees Cook
2019-11-13 18:27   ` Kees Cook
2019-11-13 20:04 ` Eric Biggers
2019-11-13 20:04   ` Eric Biggers
2019-11-13 22:28   ` Sami Tolvanen
2019-11-13 22:28     ` Sami Tolvanen
2019-11-14  9:45     ` Ard Biesheuvel
2019-11-14  9:45       ` Ard Biesheuvel
2019-11-14 18:21       ` Sami Tolvanen
2019-11-14 18:21         ` Sami Tolvanen
2019-11-14 22:51 ` [PATCH v2] " Sami Tolvanen
2019-11-14 22:51   ` Sami Tolvanen
2019-11-15 11:32   ` Ard Biesheuvel
2019-11-15 11:32     ` Ard Biesheuvel
2019-11-19 20:01     ` Sami Tolvanen
2019-11-19 20:01       ` Sami Tolvanen
2019-11-19 20:13 ` [PATCH v3] " Sami Tolvanen
2019-11-19 20:13   ` Sami Tolvanen
2019-11-22 11:04   ` Herbert Xu
2019-11-22 11:04     ` Herbert Xu
2019-11-22 11:05     ` Herbert Xu
2019-11-22 11:05       ` Herbert Xu
2019-11-27 18:19   ` Eric Biggers
2019-11-27 18:19     ` Eric Biggers
2019-11-27 23:42     ` Sami Tolvanen
2019-11-27 23:42       ` Sami Tolvanen
2019-11-27 23:55 ` [PATCH v4] " Sami Tolvanen
2019-11-27 23:55   ` Sami Tolvanen
2019-11-28  0:25   ` Eric Biggers
2019-11-28  0:25     ` Eric Biggers
2019-12-11  9:40   ` Herbert Xu
2019-12-11  9:40     ` Herbert Xu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.