linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Robert Elliott <elliott@hpe.com>
To: herbert@gondor.apana.org.au, davem@davemloft.net,
	tim.c.chen@linux.intel.com, ap420073@gmail.com, ardb@kernel.org,
	Jason@zx2c4.com, David.Laight@ACULAB.COM, ebiggers@kernel.org,
	linux-crypto@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Robert Elliott <elliott@hpe.com>
Subject: [PATCH v4 22/24] crypto: x86 - report missing CPU features via module parameters
Date: Tue, 15 Nov 2022 22:13:40 -0600	[thread overview]
Message-ID: <20221116041342.3841-23-elliott@hpe.com> (raw)
In-Reply-To: <20221116041342.3841-1-elliott@hpe.com>

Don't refuse to load modules based on missing additional x86 features
(e.g., OSXSAVE) or x86 XSAVE features (e.g., YMM). Instead, load the module,
but don't register any crypto drivers. Report the fact that one or more
features are missing in a new missing_x86_features module parameter
(0 = no problems, 1 = something is missing; each module parameter
description lists all the features that it wants).

For the SHA functions that register up to four drivers based on CPU
features, report separate module parameters for each set:
	missing_x86_features_avx2
	missing_x86_features_avx

Signed-off-by: Robert Elliott <elliott@hpe.com>
---
 arch/x86/crypto/aegis128-aesni-glue.c      | 15 ++++++++++---
 arch/x86/crypto/aria_aesni_avx_glue.c      | 24 +++++++++++---------
 arch/x86/crypto/camellia_aesni_avx2_glue.c | 25 ++++++++++++---------
 arch/x86/crypto/camellia_aesni_avx_glue.c  | 25 ++++++++++++---------
 arch/x86/crypto/cast5_avx_glue.c           | 20 ++++++++++-------
 arch/x86/crypto/cast6_avx_glue.c           | 20 ++++++++++-------
 arch/x86/crypto/curve25519-x86_64.c        | 12 ++++++++--
 arch/x86/crypto/nhpoly1305-avx2-glue.c     | 14 +++++++++---
 arch/x86/crypto/polyval-clmulni_glue.c     | 15 ++++++++++---
 arch/x86/crypto/serpent_avx2_glue.c        | 24 +++++++++++---------
 arch/x86/crypto/serpent_avx_glue.c         | 21 ++++++++++-------
 arch/x86/crypto/sha1_ssse3_glue.c          | 20 +++++++++++++----
 arch/x86/crypto/sha256_ssse3_glue.c        | 18 +++++++++++++--
 arch/x86/crypto/sha512_ssse3_glue.c        | 18 +++++++++++++--
 arch/x86/crypto/sm3_avx_glue.c             | 22 ++++++++++--------
 arch/x86/crypto/sm4_aesni_avx2_glue.c      | 26 +++++++++++++---------
 arch/x86/crypto/sm4_aesni_avx_glue.c       | 26 +++++++++++++---------
 arch/x86/crypto/twofish_avx_glue.c         | 19 ++++++++++------
 18 files changed, 243 insertions(+), 121 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index a3ebd018953c..e0312ecf34a8 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -288,6 +288,11 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (SSE2) and/or XSAVE features (SSE)");
+
 static struct simd_aead_alg *simd_alg;
 
 static int __init crypto_aegis128_aesni_module_init(void)
@@ -296,8 +301,10 @@ static int __init crypto_aegis128_aesni_module_init(void)
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
-	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
-		return -ENODEV;
+	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) {
+		missing_x86_features = 1;
+		return 0;
+	}
 
 	return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
 					  &simd_alg);
@@ -305,7 +312,9 @@ static int __init crypto_aegis128_aesni_module_init(void)
 
 static void __exit crypto_aegis128_aesni_module_exit(void)
 {
-	simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
+	if (!missing_x86_features)
+		simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
+	missing_x86_features = 0;
 }
 
 module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
index 9fd3d1fe1105..ebb9760967b5 100644
--- a/arch/x86/crypto/aria_aesni_avx_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -176,23 +176,25 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AES-NI, OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static int __init aria_avx_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_AES) ||
 	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("AES or OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	if (boot_cpu_has(X86_FEATURE_GFNI)) {
@@ -213,8 +215,10 @@ static int __init aria_avx_init(void)
 
 static void __exit aria_avx_exit(void)
 {
-	simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
-				  aria_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
+					  aria_simd_algs);
+	missing_x86_features = 0;
 	using_x86_gfni = 0;
 }
 
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 6c48fc9f3fde..e8ae1e1a801d 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -105,26 +105,28 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AES-NI, AVX, OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
 
 static int __init camellia_aesni_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_AES) ||
 	    !boot_cpu_has(X86_FEATURE_AVX) ||
 	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("AES-NI, AVX, or OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(camellia_algs,
@@ -134,8 +136,11 @@ static int __init camellia_aesni_init(void)
 
 static void __exit camellia_aesni_fini(void)
 {
-	simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
-				  camellia_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(camellia_algs,
+					  ARRAY_SIZE(camellia_algs),
+					  camellia_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 6d7fc96d242e..6784d631575c 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -105,25 +105,27 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AES-NI, OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
 
 static int __init camellia_aesni_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_AES) ||
 	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("AES-NI or OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(camellia_algs,
@@ -133,8 +135,11 @@ static int __init camellia_aesni_init(void)
 
 static void __exit camellia_aesni_fini(void)
 {
-	simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
-				  camellia_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(camellia_algs,
+					  ARRAY_SIZE(camellia_algs),
+					  camellia_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index bdc3c763334c..34ef032bb8d0 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -100,19 +100,21 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
 
 static int __init cast5_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(cast5_algs,
@@ -122,8 +124,10 @@ static int __init cast5_init(void)
 
 static void __exit cast5_exit(void)
 {
-	simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
-				  cast5_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
+					  cast5_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index addca34b3511..71559fd3ea87 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -100,19 +100,21 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
 
 static int __init cast6_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(cast6_algs,
@@ -122,8 +124,10 @@ static int __init cast6_init(void)
 
 static void __exit cast6_exit(void)
 {
-	simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
-				  cast6_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
+					  cast6_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(cast6_init);
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 6d222849e409..74672351e534 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -1706,13 +1706,20 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (BMI2)");
+
 static int __init curve25519_mod_init(void)
 {
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!boot_cpu_has(X86_FEATURE_BMI2))
-		return -ENODEV;
+	if (!boot_cpu_has(X86_FEATURE_BMI2)) {
+		missing_x86_features = 1;
+		return 0;
+	}
 
 	static_branch_enable(&curve25519_use_bmi2_adx);
 
@@ -1725,6 +1732,7 @@ static void __exit curve25519_mod_exit(void)
 	if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
 	    static_branch_likely(&curve25519_use_bmi2_adx))
 		crypto_unregister_kpp(&curve25519_alg);
+	missing_x86_features = 0;
 }
 
 module_init(curve25519_mod_init);
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index fa415fec5793..2e63947bc9fa 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -67,20 +67,28 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (OSXSAVE)");
+
 static int __init nhpoly1305_mod_init(void)
 {
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!boot_cpu_has(X86_FEATURE_OSXSAVE))
-		return -ENODEV;
+	if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+		missing_x86_features = 1;
+		return 0;
+	}
 
 	return crypto_register_shash(&nhpoly1305_alg);
 }
 
 static void __exit nhpoly1305_mod_exit(void)
 {
-	crypto_unregister_shash(&nhpoly1305_alg);
+	if (!missing_x86_features)
+		crypto_unregister_shash(&nhpoly1305_alg);
 }
 
 module_init(nhpoly1305_mod_init);
diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c
index b98e32f8e2a4..20d4a68ec1d7 100644
--- a/arch/x86/crypto/polyval-clmulni_glue.c
+++ b/arch/x86/crypto/polyval-clmulni_glue.c
@@ -182,20 +182,29 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AVX)");
+
 static int __init polyval_clmulni_mod_init(void)
 {
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!boot_cpu_has(X86_FEATURE_AVX))
-		return -ENODEV;
+	if (!boot_cpu_has(X86_FEATURE_AVX)) {
+		missing_x86_features = 1;
+		return 0;
+	}
 
 	return crypto_register_shash(&polyval_alg);
 }
 
 static void __exit polyval_clmulni_mod_exit(void)
 {
-	crypto_unregister_shash(&polyval_alg);
+	if (!missing_x86_features)
+		crypto_unregister_shash(&polyval_alg);
+	missing_x86_features = 0;
 }
 
 module_init(polyval_clmulni_mod_init);
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index bc18149fb928..2aa62c93a16f 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -101,23 +101,25 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
 
 static int __init serpent_avx2_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(serpent_algs,
@@ -127,8 +129,10 @@ static int __init serpent_avx2_init(void)
 
 static void __exit serpent_avx2_fini(void)
 {
-	simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
-				  serpent_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+					  serpent_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(serpent_avx2_init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 0db18d99da50..28ee9717df49 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -107,19 +107,21 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
 
 static int __init serpent_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(serpent_algs,
@@ -129,8 +131,11 @@ static int __init serpent_init(void)
 
 static void __exit serpent_exit(void)
 {
-	simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
-				  serpent_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(serpent_algs,
+					  ARRAY_SIZE(serpent_algs),
+					  serpent_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(serpent_init);
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 2445648cf234..405af5e14b67 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -351,9 +351,17 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features_avx2;
+static int missing_x86_features_avx;
+module_param(missing_x86_features_avx2, int, 0444);
+module_param(missing_x86_features_avx, int, 0444);
+MODULE_PARM_DESC(missing_x86_features_avx2,
+		 "Missing x86 instruction set extensions (BMI1, BMI2) to support AVX2");
+MODULE_PARM_DESC(missing_x86_features_avx,
+		 "Missing x86 XSAVE features (SSE, YMM) to support AVX");
+
 static int __init sha1_ssse3_mod_init(void)
 {
-	const char *feature_name;
 	int ret;
 
 	if (!x86_match_cpu(module_cpu_ids))
@@ -374,10 +382,11 @@ static int __init sha1_ssse3_mod_init(void)
 
 		if (boot_cpu_has(X86_FEATURE_BMI1) &&
 		    boot_cpu_has(X86_FEATURE_BMI2)) {
-
 			ret = crypto_register_shash(&sha1_avx2_alg);
 			if (!ret)
 				using_x86_avx2 = 1;
+		} else {
+			missing_x86_features_avx2 = 1;
 		}
 	}
 
@@ -385,11 +394,12 @@ static int __init sha1_ssse3_mod_init(void)
 	if (boot_cpu_has(X86_FEATURE_AVX)) {
 
 		if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-			       &feature_name)) {
-
+				      NULL)) {
 			ret = crypto_register_shash(&sha1_avx_alg);
 			if (!ret)
 				using_x86_avx = 1;
+		} else {
+			missing_x86_features_avx = 1;
 		}
 	}
 
@@ -415,6 +425,8 @@ static void __exit sha1_ssse3_mod_fini(void)
 	unregister_sha1_avx2();
 	unregister_sha1_avx();
 	unregister_sha1_ssse3();
+	missing_x86_features_avx2 = 0;
+	missing_x86_features_avx = 0;
 }
 
 module_init(sha1_ssse3_mod_init);
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 1464e6ccf912..293cf7085dd3 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -413,9 +413,17 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features_avx2;
+static int missing_x86_features_avx;
+module_param(missing_x86_features_avx2, int, 0444);
+module_param(missing_x86_features_avx, int, 0444);
+MODULE_PARM_DESC(missing_x86_features_avx2,
+		 "Missing x86 instruction set extensions (BMI2) to support AVX2");
+MODULE_PARM_DESC(missing_x86_features_avx,
+		 "Missing x86 XSAVE features (SSE, YMM) to support AVX");
+
 static int __init sha256_ssse3_mod_init(void)
 {
-	const char *feature_name;
 	int ret;
 
 	if (!x86_match_cpu(module_cpu_ids))
@@ -440,6 +448,8 @@ static int __init sha256_ssse3_mod_init(void)
 						ARRAY_SIZE(sha256_avx2_algs));
 			if (!ret)
 				using_x86_avx2 = 1;
+		} else {
+			missing_x86_features_avx2 = 1;
 		}
 	}
 
@@ -447,11 +457,13 @@ static int __init sha256_ssse3_mod_init(void)
 	if (boot_cpu_has(X86_FEATURE_AVX)) {
 
 		if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-			       &feature_name)) {
+				      NULL)) {
 			ret = crypto_register_shashes(sha256_avx_algs,
 						ARRAY_SIZE(sha256_avx_algs));
 			if (!ret)
 				using_x86_avx = 1;
+		} else {
+			missing_x86_features_avx = 1;
 		}
 	}
 
@@ -478,6 +490,8 @@ static void __exit sha256_ssse3_mod_fini(void)
 	unregister_sha256_avx2();
 	unregister_sha256_avx();
 	unregister_sha256_ssse3();
+	missing_x86_features_avx2 = 0;
+	missing_x86_features_avx = 0;
 }
 
 module_init(sha256_ssse3_mod_init);
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 04e2af951a3e..9f13baf7dda9 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -319,6 +319,15 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features_avx2;
+static int missing_x86_features_avx;
+module_param(missing_x86_features_avx2, int, 0444);
+module_param(missing_x86_features_avx, int, 0444);
+MODULE_PARM_DESC(missing_x86_features_avx2,
+		 "Missing x86 instruction set extensions (BMI2) to support AVX2");
+MODULE_PARM_DESC(missing_x86_features_avx,
+		 "Missing x86 XSAVE features (SSE, YMM) to support AVX");
+
 static void unregister_sha512_avx2(void)
 {
 	if (using_x86_avx2) {
@@ -330,7 +339,6 @@ static void unregister_sha512_avx2(void)
 
 static int __init sha512_ssse3_mod_init(void)
 {
-	const char *feature_name;
 	int ret;
 
 	if (!x86_match_cpu(module_cpu_ids))
@@ -343,6 +351,8 @@ static int __init sha512_ssse3_mod_init(void)
 					ARRAY_SIZE(sha512_avx2_algs));
 			if (!ret)
 				using_x86_avx2 = 1;
+		} else {
+			missing_x86_features_avx2 = 1;
 		}
 	}
 
@@ -350,11 +360,13 @@ static int __init sha512_ssse3_mod_init(void)
 	if (boot_cpu_has(X86_FEATURE_AVX)) {
 
 		if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				       &feature_name)) {
+				      NULL)) {
 			ret = crypto_register_shashes(sha512_avx_algs,
 					ARRAY_SIZE(sha512_avx_algs));
 			if (!ret)
 				using_x86_avx = 1;
+		} else {
+			missing_x86_features_avx = 1;
 		}
 	}
 
@@ -376,6 +388,8 @@ static void __exit sha512_ssse3_mod_fini(void)
 	unregister_sha512_avx2();
 	unregister_sha512_avx();
 	unregister_sha512_ssse3();
+	missing_x86_features_avx2 = 0;
+	missing_x86_features_avx = 0;
 }
 
 module_init(sha512_ssse3_mod_init);
diff --git a/arch/x86/crypto/sm3_avx_glue.c b/arch/x86/crypto/sm3_avx_glue.c
index c7786874319c..169ba6a2c806 100644
--- a/arch/x86/crypto/sm3_avx_glue.c
+++ b/arch/x86/crypto/sm3_avx_glue.c
@@ -126,22 +126,24 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (BMI2) and/or XSAVE features (SSE, YMM)");
+
 static int __init sm3_avx_mod_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_BMI2)) {
-		pr_info("BMI2 instruction are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return crypto_register_shash(&sm3_avx_alg);
@@ -149,7 +151,9 @@ static int __init sm3_avx_mod_init(void)
 
 static void __exit sm3_avx_mod_exit(void)
 {
-	crypto_unregister_shash(&sm3_avx_alg);
+	if (!missing_x86_features)
+		crypto_unregister_shash(&sm3_avx_alg);
+	missing_x86_features = 0;
 }
 
 module_init(sm3_avx_mod_init);
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
index 125b00db89b1..6bcf78231888 100644
--- a/arch/x86/crypto/sm4_aesni_avx2_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c
@@ -133,27 +133,29 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AES-NI, AVX, OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *
 simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
 
 static int __init sm4_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_AVX) ||
 	    !boot_cpu_has(X86_FEATURE_AES) ||
 	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("AVX, AES-NI, and/or OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
@@ -163,9 +165,11 @@ static int __init sm4_init(void)
 
 static void __exit sm4_exit(void)
 {
-	simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
-				ARRAY_SIZE(sm4_aesni_avx2_skciphers),
-				simd_sm4_aesni_avx2_skciphers);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
+					  ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+					  simd_sm4_aesni_avx2_skciphers);
+	missing_x86_features = 0;
 }
 
 module_init(sm4_init);
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index ac8182b197cf..03775b1079dc 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -452,26 +452,28 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 instruction set extensions (AES-NI, OSXSAVE) and/or XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *
 simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
 
 static int __init sm4_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
 	if (!boot_cpu_has(X86_FEATURE_AES) ||
 	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
-		pr_info("AES-NI or OSXSAVE instructions are not detected.\n");
-		return -ENODEV;
+		missing_x86_features = 1;
+		return 0;
 	}
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
-				&feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
@@ -481,9 +483,11 @@ static int __init sm4_init(void)
 
 static void __exit sm4_exit(void)
 {
-	simd_unregister_skciphers(sm4_aesni_avx_skciphers,
-					ARRAY_SIZE(sm4_aesni_avx_skciphers),
-					simd_sm4_aesni_avx_skciphers);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(sm4_aesni_avx_skciphers,
+					  ARRAY_SIZE(sm4_aesni_avx_skciphers),
+					  simd_sm4_aesni_avx_skciphers);
+	missing_x86_features = 0;
 }
 
 module_init(sm4_init);
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 4657e6efc35d..ae3cc4ad6f4f 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -110,18 +110,21 @@ static const struct x86_cpu_id module_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
 
+static int missing_x86_features;
+module_param(missing_x86_features, int, 0444);
+MODULE_PARM_DESC(missing_x86_features,
+		 "Missing x86 XSAVE features (SSE, YMM)");
+
 static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
 
 static int __init twofish_init(void)
 {
-	const char *feature_name;
-
 	if (!x86_match_cpu(module_cpu_ids))
 		return -ENODEV;
 
-	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, &feature_name)) {
-		pr_info("CPU feature '%s' is not supported.\n", feature_name);
-		return -ENODEV;
+	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
+		missing_x86_features = 1;
+		return 0;
 	}
 
 	return simd_register_skciphers_compat(twofish_algs,
@@ -131,8 +134,10 @@ static int __init twofish_init(void)
 
 static void __exit twofish_exit(void)
 {
-	simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
-				  twofish_simd_algs);
+	if (!missing_x86_features)
+		simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
+					  twofish_simd_algs);
+	missing_x86_features = 0;
 }
 
 module_init(twofish_init);
-- 
2.38.1


  parent reply	other threads:[~2022-11-16  4:17 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-06 22:31 [RFC PATCH 0/7] crypto: x86 - fix RCU stalls Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 1/7] rcu: correct CONFIG_EXT_RCU_CPU_STALL_TIMEOUT descriptions Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 2/7] crypto: x86/sha - limit FPU preemption Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 3/7] crypto: x86/crc " Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 4/7] crypto: x86/sm3 " Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 5/7] crypto: x86/ghash - restructure FPU context saving Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 6/7] crypto: x86/ghash - limit FPU preemption Robert Elliott
2022-10-06 22:31 ` [RFC PATCH 7/7] crypto: x86 - use common macro for FPU limit Robert Elliott
2022-10-12 21:59 ` [PATCH v2 00/19] crypto: x86 - fix RCU stalls Robert Elliott
2022-10-12 21:59   ` [PATCH v2 01/19] crypto: tcrypt - test crc32 Robert Elliott
2022-10-12 21:59   ` [PATCH v2 02/19] crypto: tcrypt - test nhpoly1305 Robert Elliott
2022-10-12 21:59   ` [PATCH v2 03/19] crypto: tcrypt - reschedule during cycles speed tests Robert Elliott
2022-10-12 21:59   ` [PATCH v2 04/19] crypto: x86/sha - limit FPU preemption Robert Elliott
2022-10-13  0:41     ` Jason A. Donenfeld
2022-10-13 21:50       ` Elliott, Robert (Servers)
2022-10-14 11:01       ` David Laight
2022-10-13  5:57     ` Eric Biggers
2022-10-13  6:04       ` Herbert Xu
2022-10-13  6:08         ` Eric Biggers
2022-10-13  7:50           ` Herbert Xu
2022-10-13 22:41       ` :Re: " Elliott, Robert (Servers)
2022-10-12 21:59   ` [PATCH v2 05/19] crypto: x86/crc " Robert Elliott
2022-10-13  2:00     ` Herbert Xu
2022-10-13 22:34       ` Elliott, Robert (Servers)
2022-10-14  4:02     ` David Laight
2022-10-24  2:03     ` kernel test robot
2022-10-12 21:59   ` [PATCH v2 06/19] crypto: x86/sm3 " Robert Elliott
2022-10-12 21:59   ` [PATCH v2 07/19] crypto: x86/ghash - restructure FPU context saving Robert Elliott
2022-10-12 21:59   ` [PATCH v2 08/19] crypto: x86/ghash - limit FPU preemption Robert Elliott
2022-10-13  6:03     ` Eric Biggers
2022-10-13 22:52       ` Elliott, Robert (Servers)
2022-10-12 21:59   ` [PATCH v2 09/19] crypto: x86 - use common macro for FPU limit Robert Elliott
2022-10-13  0:35     ` Jason A. Donenfeld
2022-10-13 21:48       ` Elliott, Robert (Servers)
2022-10-14  1:26         ` Jason A. Donenfeld
2022-10-18  0:06           ` Elliott, Robert (Servers)
2022-10-12 21:59   ` [PATCH v2 10/19] crypto: x86/sha1, sha256 - load based on CPU features Robert Elliott
2022-10-12 21:59   ` [PATCH v2 11/19] crypto: x86/crc " Robert Elliott
2022-10-12 21:59   ` [PATCH v2 12/19] crypto: x86/sm3 " Robert Elliott
2022-10-12 21:59   ` [PATCH v2 13/19] crypto: x86/ghash " Robert Elliott
2022-10-12 21:59   ` [PATCH v2 14/19] crypto: x86 " Robert Elliott
2022-10-14 14:26     ` Elliott, Robert (Servers)
2022-10-12 21:59   ` [PATCH v2 15/19] crypto: x86 - add pr_fmt to all modules Robert Elliott
2022-10-12 21:59   ` [PATCH v2 16/19] crypto: x86 - print CPU optimized loaded messages Robert Elliott
2022-10-13  0:40     ` Jason A. Donenfeld
2022-10-13 13:47     ` kernel test robot
2022-10-13 13:48     ` kernel test robot
2022-10-12 21:59   ` [PATCH v2 17/19] crypto: x86 - standardize suboptimal prints Robert Elliott
2022-10-13  0:38     ` Jason A. Donenfeld
2022-10-12 21:59   ` [PATCH v2 18/19] crypto: x86 - standardize not loaded prints Robert Elliott
2022-10-13  0:42     ` Jason A. Donenfeld
2022-10-13 22:20       ` Elliott, Robert (Servers)
2022-11-10 22:06         ` Elliott, Robert (Servers)
2022-10-12 21:59   ` [PATCH v2 19/19] crypto: x86/sha - register only the best function Robert Elliott
2022-10-13  6:07     ` Eric Biggers
2022-10-13  7:52       ` Herbert Xu
2022-10-13 22:59         ` Elliott, Robert (Servers)
2022-10-14  8:22           ` Herbert Xu
2022-11-01 21:34   ` [PATCH v2 00/19] crypto: x86 - fix RCU stalls Elliott, Robert (Servers)
2022-11-03  4:27   ` [PATCH v3 00/17] crypt: " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 01/17] crypto: tcrypt - test crc32 Robert Elliott
2022-11-03  4:27     ` [PATCH v3 02/17] crypto: tcrypt - test nhpoly1305 Robert Elliott
2022-11-03  4:27     ` [PATCH v3 03/17] crypto: tcrypt - reschedule during cycles speed tests Robert Elliott
2022-11-03  4:27     ` [PATCH v3 04/17] crypto: x86/sha - limit FPU preemption Robert Elliott
2022-11-03  4:27     ` [PATCH v3 05/17] crypto: x86/crc " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 06/17] crypto: x86/sm3 " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 07/17] crypto: x86/ghash - use u8 rather than char Robert Elliott
2022-11-03  4:27     ` [PATCH v3 08/17] crypto: x86/ghash - restructure FPU context saving Robert Elliott
2022-11-03  4:27     ` [PATCH v3 09/17] crypto: x86/ghash - limit FPU preemption Robert Elliott
2022-11-03  4:27     ` [PATCH v3 10/17] crypto: x86/*poly* " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 11/17] crypto: x86/sha - register all variations Robert Elliott
2022-11-03  9:26       ` kernel test robot
2022-11-03  4:27     ` [PATCH v3 12/17] crypto: x86/sha - minimize time in FPU context Robert Elliott
2022-11-03  4:27     ` [PATCH v3 13/17] crypto: x86/sha1, sha256 - load based on CPU features Robert Elliott
2022-11-03  4:27     ` [PATCH v3 14/17] crypto: x86/crc " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 15/17] crypto: x86/sm3 " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 16/17] crypto: x86/ghash,polyval " Robert Elliott
2022-11-03  4:27     ` [PATCH v3 17/17] crypto: x86/nhpoly1305, poly1305 " Robert Elliott
2022-11-16  4:13     ` [PATCH v4 00/24] crypto: fix RCU stalls Robert Elliott
2022-11-16  4:13       ` [PATCH v4 01/24] crypto: tcrypt - test crc32 Robert Elliott
2022-11-16  4:13       ` [PATCH v4 02/24] crypto: tcrypt - test nhpoly1305 Robert Elliott
2022-11-16  4:13       ` [PATCH v4 03/24] crypto: tcrypt - reschedule during cycles speed tests Robert Elliott
2022-11-16  4:13       ` [PATCH v4 04/24] crypto: x86/sha - limit FPU preemption Robert Elliott
2022-11-16  4:13       ` [PATCH v4 05/24] crypto: x86/crc " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 06/24] crypto: x86/sm3 " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 07/24] crypto: x86/ghash - use u8 rather than char Robert Elliott
2022-11-16  4:13       ` [PATCH v4 08/24] crypto: x86/ghash - restructure FPU context saving Robert Elliott
2022-11-16  4:13       ` [PATCH v4 09/24] crypto: x86/ghash - limit FPU preemption Robert Elliott
2022-11-16  4:13       ` [PATCH v4 10/24] crypto: x86/poly " Robert Elliott
2022-11-16 11:13         ` Jason A. Donenfeld
2022-11-22  5:06           ` Elliott, Robert (Servers)
2022-11-22  9:07             ` David Laight
2022-11-25  8:40           ` Herbert Xu
2022-11-25  8:59             ` Ard Biesheuvel
2022-11-25  9:03               ` Herbert Xu
2022-11-28 16:57                 ` Elliott, Robert (Servers)
2022-11-28 18:48                   ` Elliott, Robert (Servers)
2022-12-02  6:21             ` Elliott, Robert (Servers)
2022-12-02  9:25               ` Herbert Xu
2022-12-02 16:15                 ` Elliott, Robert (Servers)
2022-12-06  4:27                   ` Herbert Xu
2022-12-06 14:03                     ` Peter Lafreniere
2022-12-06 14:44                       ` David Laight
2022-12-06 23:06               ` Peter Lafreniere
2022-12-10  0:34                 ` Elliott, Robert (Servers)
2022-12-16 22:12                   ` Elliott, Robert (Servers)
2022-11-16  4:13       ` [PATCH v4 11/24] crypto: x86/aegis " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 12/24] crypto: x86/sha - register all variations Robert Elliott
2022-11-16  4:13       ` [PATCH v4 13/24] crypto: x86/sha - minimize time in FPU context Robert Elliott
2022-11-16  4:13       ` [PATCH v4 14/24] crypto: x86/sha - load based on CPU features Robert Elliott
2022-11-16  4:13       ` [PATCH v4 15/24] crypto: x86/crc " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 16/24] crypto: x86/sm3 " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 17/24] crypto: x86/poly " Robert Elliott
2022-11-16 11:19         ` Jason A. Donenfeld
2022-11-16  4:13       ` [PATCH v4 18/24] crypto: x86/ghash " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 19/24] crypto: x86/aesni - avoid type conversions Robert Elliott
2022-11-16  4:13       ` [PATCH v4 20/24] crypto: x86/ciphers - load based on CPU features Robert Elliott
2022-11-16 11:30         ` Jason A. Donenfeld
2022-11-16  4:13       ` [PATCH v4 21/24] crypto: x86 - report used CPU features via module parameters Robert Elliott
2022-11-16 11:26         ` Jason A. Donenfeld
2022-11-16  4:13       ` Robert Elliott [this message]
2022-11-16  4:13       ` [PATCH v4 23/24] crypto: x86 - report suboptimal CPUs " Robert Elliott
2022-11-16  4:13       ` [PATCH v4 24/24] crypto: x86 - standarize module descriptions Robert Elliott
2022-11-17  3:58       ` [PATCH v4 00/24] crypto: fix RCU stalls Herbert Xu
2022-11-17 15:13         ` Elliott, Robert (Servers)
2022-11-17 15:15           ` Jason A. Donenfeld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221116041342.3841-23-elliott@hpe.com \
    --to=elliott@hpe.com \
    --cc=David.Laight@ACULAB.COM \
    --cc=Jason@zx2c4.com \
    --cc=ap420073@gmail.com \
    --cc=ardb@kernel.org \
    --cc=davem@davemloft.net \
    --cc=ebiggers@kernel.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=tim.c.chen@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).