All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch 0/3] crypto: padlock-aes: enable on VIA Nano
@ 2009-06-09 14:35 Chuck Ebbert
  2009-06-09 14:36 ` [patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode Chuck Ebbert
                   ` (5 more replies)
  0 siblings, 6 replies; 9+ messages in thread
From: Chuck Ebbert @ 2009-06-09 14:35 UTC (permalink / raw)
  To: Harald Welte
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

The VIA Nano has a bug that makes the padlock unit fetch extra data
during encryption operations. Add workarounds for that, and enable
the driver on x86_64.

1/3 Fix ECB encryption mode
2/3 Fix CBC mode, clean up code
3/3 Enable building for 64-bit kernels

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
@ 2009-06-09 14:36 ` Chuck Ebbert
  2009-06-09 14:37 ` [patch 2/3] crypto: padlock-aes: work around Nano CPU errata in CBC mode Chuck Ebbert
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Chuck Ebbert @ 2009-06-09 14:36 UTC (permalink / raw)
  To: Harald Welte
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

From: Chuck Ebbert <cebbert@redhat.com>
crypto: padlock-aes: work around Nano CPU errata in ECB mode

The VIA Nano processor has a bug that makes it prefetch extra data
during encryption operations, causing spurious page faults. Extend
existing workarounds for ECB mode to copy the data to an temporary
buffer to avoid the problem.

Signed-off-by: Chuck Ebbert <cebbert@redhat.com>

--- work-2.6.29.4.orig/drivers/crypto/padlock-aes.c
+++ work-2.6.29.4/drivers/crypto/padlock-aes.c
@@ -18,9 +18,17 @@
 #include <linux/percpu.h>
 #include <linux/smp.h>
 #include <asm/byteorder.h>
+#include <asm/processor.h>
 #include <asm/i387.h>
 #include "padlock.h"
 
+/* number of data blocks actually fetched for each xcrypt insn */
+static unsigned int ecb_fetch_blocks = 2;
+static unsigned int cbc_fetch_blocks = 1;
+
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
 /* Control word. */
 struct cword {
 	unsigned int __attribute__ ((__packed__))
@@ -169,54 +177,59 @@ static inline void padlock_store_cword(s
  */
 
 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
-				  struct cword *control_word)
+				  struct cword *control_word, int count)
 {
 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
 		      : "+S"(input), "+D"(output)
-		      : "d"(control_word), "b"(key), "c"(1));
+		      : "d"(control_word), "b"(key), "c"(count));
 }
 
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
+			   struct cword *cword, int count)
 {
-	u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+	/*
+	 * Padlock prefetches extra data so we must provide mapped input buffers.
+	 * Assume there are at least 16 bytes of stack already in use.
+	 */
+	u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 
-	memcpy(tmp, in, AES_BLOCK_SIZE);
-	padlock_xcrypt(tmp, out, key, cword);
+	memcpy(tmp, in, count * AES_BLOCK_SIZE);
+	padlock_xcrypt(tmp, out, key, cword, count);
 }
 
 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
-			     struct cword *cword)
+			     struct cword *cword, int count)
 {
-	/* padlock_xcrypt requires at least two blocks of data. */
-	if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
-		       (PAGE_SIZE - 1)))) {
-		aes_crypt_copy(in, out, key, cword);
+	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+	 * We could avoid some copying here but it's probably not worth it.
+	 */
+	if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
+		aes_crypt_copy(in, out, key, cword, count);
 		return;
 	}
 
-	padlock_xcrypt(in, out, key, cword);
+	padlock_xcrypt(in, out, key, cword, count);
 }
 
 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 				      void *control_word, u32 count)
 {
-	if (count == 1) {
-		aes_crypt(input, output, key, control_word);
+	u32 initial = count & (ecb_fetch_blocks - 1);
+
+	if (count < ecb_fetch_blocks) {
+		aes_crypt(input, output, key, control_word, count);
 		return;
 	}
 
-	asm volatile ("test $1, %%cl;"
-		      "je 1f;"
-		      "lea -1(%%ecx), %%eax;"
-		      "mov $1, %%ecx;"
-		      ".byte 0xf3,0x0f,0xa7,0xc8;"	/* rep xcryptecb */
-		      "mov %%eax, %%ecx;"
-		      "1:"
-		      ".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
+	if (initial)
+		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
+			      : "+S"(input), "+D"(output)
+			      : "d"(control_word), "b"(key), "c"(initial));
+
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
 		      : "+S"(input), "+D"(output)
-		      : "d"(control_word), "b"(key), "c"(count)
-		      : "ax");
+		      : "d"(control_word), "b"(key), "c"(count - initial));
 }
 
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -236,7 +249,7 @@ static void aes_encrypt(struct crypto_tf
 
 	padlock_reset_key(&ctx->cword.encrypt);
 	ts_state = irq_ts_save();
-	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
 	irq_ts_restore(ts_state);
 	padlock_store_cword(&ctx->cword.encrypt);
 }
@@ -248,7 +261,7 @@ static void aes_decrypt(struct crypto_tf
 
 	padlock_reset_key(&ctx->cword.encrypt);
 	ts_state = irq_ts_save();
-	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
 	irq_ts_restore(ts_state);
 	padlock_store_cword(&ctx->cword.encrypt);
 }
@@ -441,6 +454,7 @@ static struct crypto_alg cbc_aes_alg = {
 static int __init padlock_init(void)
 {
 	int ret;
+	struct cpuinfo_x86 *c = &cpu_data(0);
 
 	if (!cpu_has_xcrypt) {
 		printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -463,6 +477,12 @@ static int __init padlock_init(void)
 
 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 
+	if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+		ecb_fetch_blocks = 8;
+		cbc_fetch_blocks = 4; /* NOTE: notused */
+		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+	}
+
 out:
 	return ret;
 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [patch 2/3] crypto: padlock-aes: work around Nano CPU errata in CBC mode
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
  2009-06-09 14:36 ` [patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode Chuck Ebbert
@ 2009-06-09 14:37 ` Chuck Ebbert
  2009-06-09 14:39 ` [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels Chuck Ebbert
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Chuck Ebbert @ 2009-06-09 14:37 UTC (permalink / raw)
  To: Harald Welte
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

From: Chuck Ebbert <cebbert@redhat.com>
crypto: padlock-aes: work around Nano CPU errata in CBC mode

Extend previous workarounds for the prefetch bug to cover CBC mode,
clean up the code a bit.

Signed-off-by: Chuck Ebbert <cebbert@redhat.com>

--- work-2.6.29.4.orig/drivers/crypto/padlock-aes.c
+++ work-2.6.29.4/drivers/crypto/padlock-aes.c
@@ -22,11 +22,16 @@
 #include <asm/i387.h>
 #include "padlock.h"
 
-/* number of data blocks actually fetched for each xcrypt insn */
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
 static unsigned int ecb_fetch_blocks = 2;
-static unsigned int cbc_fetch_blocks = 1;
-
+#define MAX_ECB_FETCH_BLOCKS (8)
 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
 
 /* Control word. */
@@ -176,7 +181,7 @@ static inline void padlock_store_cword(s
  * should be used only inside the irq_ts_save/restore() context
  */
 
-static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 				  struct cword *control_word, int count)
 {
 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
@@ -184,32 +189,65 @@ static inline void padlock_xcrypt(const 
 		      : "d"(control_word), "b"(key), "c"(count));
 }
 
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+				 u8 *iv, struct cword *control_word, int count)
+{
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
+		      : "+S" (input), "+D" (output), "+a" (iv)
+		      : "d" (control_word), "b" (key), "c" (count));
+	return iv;
+}
+
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
 			   struct cword *cword, int count)
 {
 	/*
 	 * Padlock prefetches extra data so we must provide mapped input buffers.
 	 * Assume there are at least 16 bytes of stack already in use.
 	 */
-	u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
+	u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 
 	memcpy(tmp, in, count * AES_BLOCK_SIZE);
-	padlock_xcrypt(tmp, out, key, cword, count);
+	rep_xcrypt_ecb(tmp, out, key, cword, count);
 }
 
-static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+			   u8 *iv, struct cword *cword, int count)
+{
+	/*
+	 * Padlock prefetches extra data so we must provide mapped input buffers.
+	 * Assume there are at least 16 bytes of stack already in use.
+	 */
+	u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+	memcpy(tmp, in, count * AES_BLOCK_SIZE);
+	return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
+}
+
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
 			     struct cword *cword, int count)
 {
 	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
 	 * We could avoid some copying here but it's probably not worth it.
 	 */
 	if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
-		aes_crypt_copy(in, out, key, cword, count);
+		ecb_crypt_copy(in, out, key, cword, count);
 		return;
 	}
 
-	padlock_xcrypt(in, out, key, cword, count);
+	rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+			    u8 *iv, struct cword *cword, int count)
+{
+	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+	if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
+		return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
 }
 
 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -218,7 +256,7 @@ static inline void padlock_xcrypt_ecb(co
 	u32 initial = count & (ecb_fetch_blocks - 1);
 
 	if (count < ecb_fetch_blocks) {
-		aes_crypt(input, output, key, control_word, count);
+		ecb_crypt(input, output, key, control_word, count);
 		return;
 	}
 
@@ -235,10 +273,19 @@ static inline void padlock_xcrypt_ecb(co
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 				     u8 *iv, void *control_word, u32 count)
 {
-	/* rep xcryptcbc */
-	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
+	u32 initial = count & (cbc_fetch_blocks - 1);
+
+	if (count < cbc_fetch_blocks)
+		return cbc_crypt(input, output, key, iv, control_word, count);
+
+	if (initial)
+		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
+			      : "+S" (input), "+D" (output), "+a" (iv)
+			      : "d" (control_word), "b" (key), "c" (count));
+
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
 		      : "+S" (input), "+D" (output), "+a" (iv)
-		      : "d" (control_word), "b" (key), "c" (count));
+		      : "d" (control_word), "b" (key), "c" (count-initial));
 	return iv;
 }
 
@@ -249,7 +296,7 @@ static void aes_encrypt(struct crypto_tf
 
 	padlock_reset_key(&ctx->cword.encrypt);
 	ts_state = irq_ts_save();
-	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
+	ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
 	irq_ts_restore(ts_state);
 	padlock_store_cword(&ctx->cword.encrypt);
 }
@@ -261,7 +308,7 @@ static void aes_decrypt(struct crypto_tf
 
 	padlock_reset_key(&ctx->cword.encrypt);
 	ts_state = irq_ts_save();
-	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
+	ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
 	irq_ts_restore(ts_state);
 	padlock_store_cword(&ctx->cword.encrypt);
 }
@@ -478,8 +525,8 @@ static int __init padlock_init(void)
 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 
 	if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
-		ecb_fetch_blocks = 8;
-		cbc_fetch_blocks = 4; /* NOTE: notused */
+		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
 		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
 	}
 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
  2009-06-09 14:36 ` [patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode Chuck Ebbert
  2009-06-09 14:37 ` [patch 2/3] crypto: padlock-aes: work around Nano CPU errata in CBC mode Chuck Ebbert
@ 2009-06-09 14:39 ` Chuck Ebbert
  2009-06-09 18:47   ` Sebastian Andrzej Siewior
  2009-06-09 16:29 ` [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 9+ messages in thread
From: Chuck Ebbert @ 2009-06-09 14:39 UTC (permalink / raw)
  To: Harald Welte
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
crypto: padlock-aes: enable on 64-bit kernels

The only required change now is using the right push/pop instruction
on x86-64. Taken from the original patch by Sebastian Andrzej Siewior.
(Added a dependency on X86.)

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Signed-off-by: Chuck Ebbert <cebbert@redhat.com>

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -12,7 +12,7 @@ if CRYPTO_HW
 
 config CRYPTO_DEV_PADLOCK
 	tristate "Support for VIA PadLock ACE"
-	depends on X86_32 && !UML
+	depends on X86 && !UML 
 	select CRYPTO_ALGAPI
 	help
 	  Some VIA processors come with an integrated crypto engine
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd1..ddd27c7 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
 	int cpu = raw_smp_processor_id();
 
 	if (cword != per_cpu(last_cword, cpu))
+#ifndef CONFIG_X86_64
 		asm volatile ("pushfl; popfl");
+#else
+		asm volatile ("pushfq; popfq");
+#endif
 }
 
 static inline void padlock_store_cword(struct cword *cword)

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
                   ` (2 preceding siblings ...)
  2009-06-09 14:39 ` [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels Chuck Ebbert
@ 2009-06-09 16:29 ` Chuck Ebbert
  2009-06-09 18:42 ` Sebastian Andrzej Siewior
  2009-06-09 18:45 ` Harald Welte
  5 siblings, 0 replies; 9+ messages in thread
From: Chuck Ebbert @ 2009-06-09 16:29 UTC (permalink / raw)
  To: Harald Welte
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

On Tue, 9 Jun 2009 10:35:33 -0400
Chuck Ebbert <cebbert@redhat.com> wrote:

> The VIA Nano has a bug that makes the padlock unit fetch extra data
> during encryption operations. Add workarounds for that, and enable
> the driver on x86_64.
> 
> 1/3 Fix ECB encryption mode
> 2/3 Fix CBC mode, clean up code
> 3/3 Enable building for 64-bit kernels

Forgot to mention, I have 64-bit working on a Samsung NC20 with LVM
encrypted disks using AES. The cryptomgr self-tests pass too. Before
the prefetch fixes it was oopsing near page boundaries when trying to
decrypt disk blocks.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
                   ` (3 preceding siblings ...)
  2009-06-09 16:29 ` [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
@ 2009-06-09 18:42 ` Sebastian Andrzej Siewior
  2009-06-10 16:48   ` Harald Welte
  2009-06-09 18:45 ` Harald Welte
  5 siblings, 1 reply; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-06-09 18:42 UTC (permalink / raw)
  To: Chuck Ebbert; +Cc: Harald Welte, Herbert Xu, linux-kernel, linux-crypto

* Chuck Ebbert | 2009-06-09 10:35:33 [-0400]:

>The VIA Nano has a bug that makes the padlock unit fetch extra data
>during encryption operations. Add workarounds for that, and enable
>the driver on x86_64.
Nice. The X86_64 padlock will make it mainline in next merge window so
I'm asking you kindly to rebase it against Herbert's cryptodev tree [0].

I guess the bug can trigger on 32bit if you boot the affected 64bit CPU
in 32bit mode? I'm not sure if it is better to send this patches via
stable tree _or_ deactivate the padlock on affected CPUs.

[0]
git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano
  2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
                   ` (4 preceding siblings ...)
  2009-06-09 18:42 ` Sebastian Andrzej Siewior
@ 2009-06-09 18:45 ` Harald Welte
  5 siblings, 0 replies; 9+ messages in thread
From: Harald Welte @ 2009-06-09 18:45 UTC (permalink / raw)
  To: Chuck Ebbert
  Cc: Herbert Xu, linux-kernel, linux-crypto, Sebastian Andrzej Siewior

On Tue, Jun 09, 2009 at 10:35:33AM -0400, Chuck Ebbert wrote:
> The VIA Nano has a bug that makes the padlock unit fetch extra data
> during encryption operations. Add workarounds for that, and enable
> the driver on x86_64.

Thanks!

Where did you get the errata from, and what kind of document / revision is it?
I have not seen that document so far.

I've asked Centaur to confirm the errata, also inquired whether there will be
a microcode update or a new stepping to fix it.  If yes, we should add proper
checks to the workaround to make sure it's only enabled when we need it.

Out of curiosity: Only the ACE (crypto) is affected, not PHE (hashing)?

I'll also put on my todo list to do some actual benchmarking to determine
how much impact this bug has.  I expect especially for storage encryption,
many crypto operations will be page aligned and thus need the extra memcpy :(

p.s.: the patch to enable padlock on x86_64 is already in the crypto-dev tree,
as far as I know.  Interestingly, I have not observed this problem so far,
despite running dm-crypt on a nano for quite some time. 

Cheers,
-- 
- Harald Welte <HaraldWelte@viatech.com>	    http://linux.via.com.tw/
============================================================================
VIA Free and Open Source Software Liaison

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels
  2009-06-09 14:39 ` [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels Chuck Ebbert
@ 2009-06-09 18:47   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-06-09 18:47 UTC (permalink / raw)
  To: Chuck Ebbert; +Cc: Harald Welte, Herbert Xu, linux-kernel, linux-crypto

* Chuck Ebbert | 2009-06-09 10:39:07 [-0400]:

>From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
>crypto: padlock-aes: enable on 64-bit kernels
>
>The only required change now is using the right push/pop instruction
>on x86-64. Taken from the original patch by Sebastian Andrzej Siewior.
>(Added a dependency on X86.)
My original patch is allready in cryptodev followed by another patch for
the correct dependency.

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [patch 0/3] crypto: padlock-aes: enable on VIA Nano
  2009-06-09 18:42 ` Sebastian Andrzej Siewior
@ 2009-06-10 16:48   ` Harald Welte
  0 siblings, 0 replies; 9+ messages in thread
From: Harald Welte @ 2009-06-10 16:48 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Chuck Ebbert, Herbert Xu, linux-kernel, linux-crypto

Hi all,

On Tue, Jun 09, 2009 at 08:42:03PM +0200, Sebastian Andrzej Siewior wrote:
> * Chuck Ebbert | 2009-06-09 10:35:33 [-0400]:
> 
> >The VIA Nano has a bug that makes the padlock unit fetch extra data
> >during encryption operations. Add workarounds for that, and enable
> >the driver on x86_64.
> Nice. The X86_64 padlock will make it mainline in next merge window so
> I'm asking you kindly to rebase it against Herbert's cryptodev tree [0].
> 
> I guess the bug can trigger on 32bit if you boot the affected 64bit CPU
> in 32bit mode? I'm not sure if it is better to send this patches via
> stable tree _or_ deactivate the padlock on affected CPUs.

I have now re-confirmed the errata with VIA/Centaur guys, and it seems correct.
Also, reviewing [but not testing] the patch, it looks correct to me.

The check for stepping '2' is also correct, as stepping 3 no longer has this
issue.

Chuck, I'd be more than thankful if you could rebase and resubmit as requested.
If not, please drop me a not so i can put it on my todo list.

Acked-by: HaraldWelte <HaraldWelte@viatech.com>
-- 
- Harald Welte <HaraldWelte@viatech.com>	    http://linux.via.com.tw/
============================================================================
VIA Free and Open Source Software Liaison

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2009-06-10 16:50 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-06-09 14:35 [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
2009-06-09 14:36 ` [patch 1/3] crypto: padlock-aes: work around Nano CPU errata in ECB mode Chuck Ebbert
2009-06-09 14:37 ` [patch 2/3] crypto: padlock-aes: work around Nano CPU errata in CBC mode Chuck Ebbert
2009-06-09 14:39 ` [patch 3/3] crypto: padlock-aes: enable on 64-bit kernels Chuck Ebbert
2009-06-09 18:47   ` Sebastian Andrzej Siewior
2009-06-09 16:29 ` [patch 0/3] crypto: padlock-aes: enable on VIA Nano Chuck Ebbert
2009-06-09 18:42 ` Sebastian Andrzej Siewior
2009-06-10 16:48   ` Harald Welte
2009-06-09 18:45 ` Harald Welte

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.