historical-speck.lore.kernel.org archive mirror
 help / color / mirror / Atom feed
* [MODERATED] LVI
@ 2019-11-19 17:40 Josh Poimboeuf
  2019-11-19 17:51 ` [MODERATED] LVI Andrew Cooper
                   ` (5 more replies)
  0 siblings, 6 replies; 34+ messages in thread
From: Josh Poimboeuf @ 2019-11-19 17:40 UTC (permalink / raw)
  To: speck

Hi,

What kernel changes (if any) are needed for LVI?  I haven't seen any
discussion here.

The last I heard, the official CRD was Dec 10, but was likely to move to
March.

For the uninitiated, LVI is a reverse MDS/L1TF:

1) Victim puts secret data in CPU buffer or L1.  Alternatively,
   attacker puts address of secret data in CPU buffer or L1.

2) Attacker gets victim to fault or assist on a load.  (Note that an
   assist gives a much bigger speculation window - it can be triggered
   if a page Accessed bit needs updating)

3) While waiting for the fault/assist to complete, victim speculatively
   reads CPU buffer or L1 to get data (or address) from step 1.

4) Victim gadgets expose the data via the usual L1 side channel.


To protect the kernel, we'd presumably need to look for places where
users can trigger a faulting/assisting load.  For example,
copy_from_user().

copy_from_user() has an LFENCE between the access_ok() check and the
actual copy to protect against Spectre v1.  What if we move that LFENCE
to *after* the copy?  I think that would protect against both Spectre v1
and LVI.

Thoughts?

diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 61d93f062a36..457207aece71 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -119,9 +119,9 @@ extern int __get_user_bad(void);
 
 #define __uaccess_begin() stac()
 #define __uaccess_end()   clac()
-#define __uaccess_begin_nospec()	\
+#define __uaccess_end_nospec()	\
 ({					\
-	stac();				\
+	clac();				\
 	barrier_nospec();		\
 })
 
@@ -446,9 +446,9 @@ __pu_label:							\
 	__inttype(*(ptr)) __gu_val;					\
 	__typeof__(ptr) __gu_ptr = (ptr);				\
 	__typeof__(size) __gu_size = (size);				\
-	__uaccess_begin_nospec();					\
+	__uaccess_begin();					\
 	__get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT);	\
-	__uaccess_end();						\
+	__uaccess_end_nospec();						\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
 	__builtin_expect(__gu_err, 0);					\
 })
@@ -496,10 +496,10 @@ struct __large_struct { unsigned long buf[100]; };
 
 #define uaccess_try_nospec do {						\
 	current->thread.uaccess_err = 0;				\
-	__uaccess_begin_nospec();					\
+	__uaccess_begin();						\
 
 #define uaccess_catch(err)						\
-	__uaccess_end();						\
+	__uaccess_end_nospec();						\
 	(err) |= (current->thread.uaccess_err ? -EFAULT : 0);		\
 } while (0)
 
@@ -592,7 +592,7 @@ extern void __cmpxchg_wrong_size(void)
 	int __ret = 0;							\
 	__typeof__(*(ptr)) __old = (old);				\
 	__typeof__(*(ptr)) __new = (new);				\
-	__uaccess_begin_nospec();					\
+	__uaccess_begin();						\
 	switch (size) {							\
 	case 1:								\
 	{								\
@@ -664,7 +664,7 @@ extern void __cmpxchg_wrong_size(void)
 	default:							\
 		__cmpxchg_wrong_size();					\
 	}								\
-	__uaccess_end();						\
+	__uaccess_end_nospec();						\
 	*(uval) = __old;						\
 	__ret;								\
 })
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index ba2dc1930630..c23fdec72b26 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -29,24 +29,24 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 		switch (n) {
 		case 1:
 			ret = 0;
-			__uaccess_begin_nospec();
+			__uaccess_begin();
 			__get_user_asm_nozero(*(u8 *)to, from, ret,
 					      "b", "b", "=q", 1);
-			__uaccess_end();
+			__uaccess_end_nospec();
 			return ret;
 		case 2:
 			ret = 0;
-			__uaccess_begin_nospec();
+			__uaccess_begin();
 			__get_user_asm_nozero(*(u16 *)to, from, ret,
 					      "w", "w", "=r", 2);
-			__uaccess_end();
+			__uaccess_end_nospec();
 			return ret;
 		case 4:
 			ret = 0;
-			__uaccess_begin_nospec();
+			__uaccess_begin();
 			__get_user_asm_nozero(*(u32 *)to, from, ret,
 					      "l", "k", "=r", 4);
-			__uaccess_end();
+			__uaccess_end_nospec();
 			return ret;
 		}
 	}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 5cd1caa8bc65..7013f9ffded7 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -71,48 +71,48 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
 		return copy_user_generic(dst, (__force void *)src, size);
 	switch (size) {
 	case 1:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
 			      ret, "b", "b", "=q", 1);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	case 2:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
 			      ret, "w", "w", "=r", 2);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	case 4:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
 			      ret, "l", "k", "=r", 4);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	case 8:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
 			      ret, "q", "", "=r", 8);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	case 10:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
 			       ret, "q", "", "=r", 10);
 		if (likely(!ret))
 			__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
 				       (u16 __user *)(8 + (char __user *)src),
 				       ret, "w", "w", "=r", 2);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	case 16:
-		__uaccess_begin_nospec();
+		__uaccess_begin();
 		__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
 			       ret, "q", "", "=r", 16);
 		if (likely(!ret))
 			__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
 				       (u64 __user *)(8 + (char __user *)src),
 				       ret, "q", "", "=r", 8);
-		__uaccess_end();
+		__uaccess_end_nospec();
 		return ret;
 	default:
 		return copy_user_generic(dst, (__force void *)src, size);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 7d290777246d..b3136b75d550 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -331,12 +331,12 @@ do {									\
 
 unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
 {
-	__uaccess_begin_nospec();
+	__uaccess_begin();
 	if (movsl_is_ok(to, from, n))
 		__copy_user(to, from, n);
 	else
 		n = __copy_user_intel(to, from, n);
-	__uaccess_end();
+	__uaccess_end_nospec();
 	return n;
 }
 EXPORT_SYMBOL(__copy_user_ll);
@@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll);
 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
 					unsigned long n)
 {
-	__uaccess_begin_nospec();
+	__uaccess_begin();
 #ifdef CONFIG_X86_INTEL_USERCOPY
 	if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
 		n = __copy_user_intel_nocache(to, from, n);
@@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
 #else
 	__copy_user(to, from, n);
 #endif
-	__uaccess_end();
+	__uaccess_end_nospec();
 	return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);

^ permalink raw reply related	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2019-11-27  7:38 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-19 17:40 [MODERATED] LVI Josh Poimboeuf
2019-11-19 17:51 ` [MODERATED] LVI Andrew Cooper
2019-11-19 18:27   ` Josh Poimboeuf
2019-11-19 19:26     ` Andrew Cooper
2019-11-20  9:52     ` Paolo Bonzini
2019-11-19 18:12 ` Greg KH
2019-11-19 18:21   ` Josh Poimboeuf
2019-11-19 18:46     ` Greg KH
2019-11-19 18:21   ` Paolo Bonzini
2019-11-19 18:22 ` Andrew Cooper
2019-11-19 18:27   ` Josh Poimboeuf
2019-11-19 18:36     ` Luck, Tony
2019-11-20 17:02       ` Greg KH
2019-11-19 18:39     ` Andrew Cooper
2019-11-19 21:00       ` Josh Poimboeuf
2019-11-19 21:03         ` Josh Poimboeuf
2019-11-20 14:11           ` Andrew Cooper
2019-11-20  8:04 ` Peter Zijlstra
2019-11-20  9:49   ` Andrew Cooper
2019-11-20 17:13 ` Josh Poimboeuf
2019-11-20 17:25   ` Greg KH
2019-11-20 17:29     ` Tyler Hicks
2019-11-20 17:30     ` Andrew Cooper
2019-11-20 17:46       ` Greg KH
2019-11-20 19:09     ` Peter Zijlstra
2019-11-20 19:19       ` Greg KH
2019-11-21  0:50         ` LVI Thomas Gleixner
2019-11-21 13:45           ` [MODERATED] LVI Greg KH
2019-11-26  0:54 ` Andi Kleen
2019-11-26 10:37   ` Greg KH
2019-11-26 18:23     ` Andi Kleen
2019-11-27  7:38       ` Greg KH
2019-11-26 10:55   ` Paolo Bonzini
2019-11-26 18:28     ` Andi Kleen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).