All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] Drop some asm from copy_user_64.S
@ 2015-05-12 20:57 Borislav Petkov
  2015-05-12 21:13 ` Linus Torvalds
  2015-05-13  6:19 ` Ingo Molnar
  0 siblings, 2 replies; 12+ messages in thread
From: Borislav Petkov @ 2015-05-12 20:57 UTC (permalink / raw)
  To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, Linus Torvalds,
	Andy Lutomirski, Denys Vlasenko
  Cc: lkml

Hi guys,

this is just an RFC first to sanity-check what I'm trying to do:

I want to get rid of the asm glue in arch/x86/lib/copy_user_64.S which
prepares the copy_user* alternatives calls. And replace it with nice and
clean C.

The other intention is to switch to using copy_user_generic() which does
CALL <copy_user_function> directly instead of as it is now with CALL
_copy_*_user and inside the JMP to the proper <copy_user_function>,
i.e., to save us that JMP.

I'm not 100% sure about the equivalence between the addition carry and
segment limit check we're doing in asm in arch/x86/lib/copy_user_64.S
now and with the access_ok() I've replaced it with.

I mean, it *looks* like access_ok() and __chk_range_not_ok() especially
does the proper checks - addition carry and segment limit with
user_addr_max() but I'd like for someone much more experienced than me
to double-check that.

So, without much further ado, here is the diff. It looks simple enough...

Thanks!

---
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index ace9dec050b1..098f3fd5cc75 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -642,10 +642,11 @@ extern struct movsl_mask {
 # include <asm/uaccess_64.h>
 #endif
 
-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
-					   unsigned n);
-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
-					 unsigned n);
+extern __always_inline __must_check
+int _copy_from_user(void *dst, const void __user *src, unsigned size);
+
+extern __always_inline __must_check
+int _copy_to_user(void __user *dst, const void *src, unsigned size);
 
 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 # define copy_user_diag __compiletime_error
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f2f9b39b274a..1aebc658acf9 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -99,6 +99,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
 }
 
 static __always_inline __must_check
+int _copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+	if (!access_ok(VERIFY_READ, src, size)) {
+		memset(dst, 0, size);
+		return 0;
+	}
+
+	return copy_user_generic(dst, src, size);
+}
+
+static __always_inline __must_check
 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
 	int ret = 0;
@@ -149,6 +160,15 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
 }
 
 static __always_inline __must_check
+int _copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+	if (!access_ok(VERIFY_WRITE, dst, size))
+		return size;
+
+	return copy_user_generic(dst, src, size);
+}
+
+static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 06ce685c3a5d..a577bdc0f5bf 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -12,60 +12,9 @@
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/cpufeature.h>
-#include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
 
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-	CFI_STARTPROC
-	GET_THREAD_INFO(%rax)
-	movq %rdi,%rcx
-	addq %rdx,%rcx
-	jc bad_to_user
-	cmpq TI_addr_limit(%rax),%rcx
-	ja bad_to_user
-	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
-		      "jmp copy_user_generic_string",		\
-		      X86_FEATURE_REP_GOOD,			\
-		      "jmp copy_user_enhanced_fast_string",	\
-		      X86_FEATURE_ERMS
-	CFI_ENDPROC
-ENDPROC(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-	CFI_STARTPROC
-	GET_THREAD_INFO(%rax)
-	movq %rsi,%rcx
-	addq %rdx,%rcx
-	jc bad_from_user
-	cmpq TI_addr_limit(%rax),%rcx
-	ja bad_from_user
-	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
-		      "jmp copy_user_generic_string",		\
-		      X86_FEATURE_REP_GOOD,			\
-		      "jmp copy_user_enhanced_fast_string",	\
-		      X86_FEATURE_ERMS
-	CFI_ENDPROC
-ENDPROC(_copy_from_user)
-
-	.section .fixup,"ax"
-	/* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-	CFI_STARTPROC
-	movl %edx,%ecx
-	xorl %eax,%eax
-	rep
-	stosb
-bad_to_user:
-	movl %edx,%eax
-	ret
-	CFI_ENDPROC
-ENDPROC(bad_from_user)
-	.previous
-
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--

^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-05-14  9:36 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-12 20:57 [RFC PATCH] Drop some asm from copy_user_64.S Borislav Petkov
2015-05-12 21:13 ` Linus Torvalds
2015-05-12 21:53   ` Borislav Petkov
2015-05-13  9:52     ` Borislav Petkov
2015-05-13 10:31       ` Ingo Molnar
2015-05-13 10:43         ` Borislav Petkov
2015-05-13 10:46           ` Ingo Molnar
2015-05-13 11:16             ` Borislav Petkov
2015-05-13 16:02       ` Linus Torvalds
2015-05-14  9:36         ` Borislav Petkov
2015-05-13  6:19 ` Ingo Molnar
2015-05-13 10:28   ` Borislav Petkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.