linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values
@ 2019-10-12  0:37 Max Filippov
  2019-10-12  0:37 ` [PATCH v2 1/4] " Max Filippov
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Max Filippov @ 2019-10-12  0:37 UTC (permalink / raw)
  To: linux-xtensa, Al Viro; +Cc: Chris Zankel, linux-kernel, Max Filippov

Hello,

this series fixes return value, out-of-bound stack access and value
truncation in xtensa implementation of {get,put}_user() for 64bit
values. It also cleans up naming of assembly parameters in
__{get,put}_user_asm and __check_align_{1,2,4}.

Changes v1->v2:
- initialize result when access_ok check fails in __get_user_check
- initialize result in __get_user_asm for unaligned access

Al Viro (1):
  xtensa: fix {get,put}_user() for 64bit values

Max Filippov (3):
  xtensa: clean up assembly arguments in uaccess macros
  xtensa: fix type conversion in __get_user_[no]check
  xtensa: initialize result in __get_user_asm for unaligned access

 arch/xtensa/include/asm/uaccess.h | 105 +++++++++++++++++-------------
 1 file changed, 60 insertions(+), 45 deletions(-)

-- 
2.20.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/4] xtensa: fix {get,put}_user() for 64bit values
  2019-10-12  0:37 [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values Max Filippov
@ 2019-10-12  0:37 ` Max Filippov
  2019-10-12  0:37 ` [PATCH v2 2/4] xtensa: clean up assembly arguments in uaccess macros Max Filippov
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Max Filippov @ 2019-10-12  0:37 UTC (permalink / raw)
  To: linux-xtensa, Al Viro; +Cc: Chris Zankel, linux-kernel, Max Filippov

From: Al Viro <viro@zeniv.linux.org.uk>

First of all, on short copies __copy_{to,from}_user() return the amount
of bytes left uncopied, *not* -EFAULT.  get_user() and put_user() are
expected to return -EFAULT on failure.

Another problem is get_user(v32, (__u64 __user *)p); that should
fetch 64bit value and the assign it to v32, truncating it in process.
Current code, OTOH, reads 8 bytes of data and stores them at the
address of v32, stomping on the 4 bytes that follow v32 itself.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
 arch/xtensa/include/asm/uaccess.h | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..f568c00392ec 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do {									\
 	case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break;	\
 	case 8: {							\
 		     __typeof__(*ptr) __v64 = x;			\
-		     retval = __copy_to_user(ptr, &__v64, 8);		\
+		     retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;	\
 		     break;						\
 	        }							\
 	default: __put_user_bad();					\
@@ -198,7 +198,16 @@ do {									\
 	case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
 	case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
 	case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
-	case 8: retval = __copy_from_user(&x, ptr, 8);    break;	\
+	case 8: {							\
+		u64 __x;						\
+		if (unlikely(__copy_from_user(&__x, ptr, 8))) {		\
+			retval = -EFAULT;				\
+			(x) = 0;					\
+		} else {						\
+			(x) = *(__force __typeof__((ptr)))&__x;		\
+		}							\
+		break;							\
+	}								\
 	default: (x) = __get_user_bad();				\
 	}								\
 } while (0)
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/4] xtensa: clean up assembly arguments in uaccess macros
  2019-10-12  0:37 [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values Max Filippov
  2019-10-12  0:37 ` [PATCH v2 1/4] " Max Filippov
@ 2019-10-12  0:37 ` Max Filippov
  2019-10-12  0:37 ` [PATCH v2 3/4] xtensa: fix type conversion in __get_user_[no]check Max Filippov
  2019-10-12  0:37 ` [PATCH v2 4/4] xtensa: initialize result in __get_user_asm for unaligned access Max Filippov
  3 siblings, 0 replies; 5+ messages in thread
From: Max Filippov @ 2019-10-12  0:37 UTC (permalink / raw)
  To: linux-xtensa, Al Viro; +Cc: Chris Zankel, linux-kernel, Max Filippov

Numeric assembly arguments are hard to understand and assembly code that
uses them is hard to modify. Use named arguments in __check_align_*,
__get_user_asm and __put_user_asm. Modify macro parameter names so that
they don't affect argument names.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
 arch/xtensa/include/asm/uaccess.h | 42 +++++++++++++++----------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index f568c00392ec..aca510707189 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -132,14 +132,14 @@ do {									\
 #define __check_align_1  ""
 
 #define __check_align_2				\
-	"   _bbci.l %3,  0, 1f		\n"	\
-	"   movi    %0, %4		\n"	\
+	"   _bbci.l %[addr], 0, 1f	\n"	\
+	"   movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
 #define __check_align_4				\
-	"   _bbsi.l %3,  0, 0f		\n"	\
-	"   _bbci.l %3,  1, 1f		\n"	\
-	"0: movi    %0, %4		\n"	\
+	"   _bbsi.l %[addr], 0, 0f	\n"	\
+	"   _bbci.l %[addr], 1, 1f	\n"	\
+	"0: movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
 
@@ -151,24 +151,24 @@ do {									\
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __put_user_asm(x, addr, err, align, insn, cb)	\
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(					\
 	__check_align_##align				\
-	"1: "insn"  %2, %3, 0		\n"		\
+	"1: "insn"  %[x], %[addr], 0	\n"		\
 	"2:				\n"		\
 	"   .section  .fixup,\"ax\"	\n"		\
 	"   .align 4			\n"		\
 	"   .literal_position		\n"		\
 	"5:				\n"		\
-	"   movi   %1, 2b		\n"		\
-	"   movi   %0, %4		\n"		\
-	"   jx     %1			\n"		\
+	"   movi   %[tmp], 2b		\n"		\
+	"   movi   %[err], %[efault]	\n"		\
+	"   jx     %[tmp]		\n"		\
 	"   .previous			\n"		\
 	"   .section  __ex_table,\"a\"	\n"		\
 	"   .long	1b, 5b		\n"		\
 	"   .previous"					\
-	:"=r" (err), "=r" (cb)				\
-	:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+	:[err] "=r"(err_), [tmp] "=r"(cb)		\
+	:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
 
 #define __get_user_nocheck(x, ptr, size)			\
 ({								\
@@ -217,25 +217,25 @@ do {									\
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__(			\
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+__asm__ __volatile__(				\
 	__check_align_##align			\
-	"1: "insn"  %2, %3, 0		\n"	\
+	"1: "insn"  %[x], %[addr], 0	\n"	\
 	"2:				\n"	\
 	"   .section  .fixup,\"ax\"	\n"	\
 	"   .align 4			\n"	\
 	"   .literal_position		\n"	\
 	"5:				\n"	\
-	"   movi   %1, 2b		\n"	\
-	"   movi   %2, 0		\n"	\
-	"   movi   %0, %4		\n"	\
-	"   jx     %1			\n"	\
+	"   movi   %[tmp], 2b		\n"	\
+	"   movi   %[x], 0		\n"	\
+	"   movi   %[err], %[efault]	\n"	\
+	"   jx     %[tmp]		\n"	\
 	"   .previous			\n"	\
 	"   .section  __ex_table,\"a\"	\n"	\
 	"   .long	1b, 5b		\n"	\
 	"   .previous"				\
-	:"=r" (err), "=r" (cb), "=r" (x)	\
-	:"r" (addr), "i" (-EFAULT), "0" (err))
+	:[err] "=r"(err_), [tmp] "=r"(cb), [x] "=r"(x_)\
+	:[addr] "r"(addr_), [efault] "i"(-EFAULT))
 
 
 /*
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 3/4] xtensa: fix type conversion in __get_user_[no]check
  2019-10-12  0:37 [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values Max Filippov
  2019-10-12  0:37 ` [PATCH v2 1/4] " Max Filippov
  2019-10-12  0:37 ` [PATCH v2 2/4] xtensa: clean up assembly arguments in uaccess macros Max Filippov
@ 2019-10-12  0:37 ` Max Filippov
  2019-10-12  0:37 ` [PATCH v2 4/4] xtensa: initialize result in __get_user_asm for unaligned access Max Filippov
  3 siblings, 0 replies; 5+ messages in thread
From: Max Filippov @ 2019-10-12  0:37 UTC (permalink / raw)
  To: linux-xtensa, Al Viro; +Cc: Chris Zankel, linux-kernel, Max Filippov

__get_user_[no]check uses temporary buffer of type long to store result
of __get_user_size and do sign extension on it when necessary. This
doesn't work correctly for 64-bit data. Fix it by moving temporary
buffer/sign extension logic to __get_user_asm.
Don't do assignment of __get_user_bad result to (x) as it may not always
be integer-compatible now and issue warning even when it's going to be
optimized. Instead do (x) = 0; and call __get_user_bad separately.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
Changes v1->v2:
- initialize result when access_ok check fails in __get_user_check

 arch/xtensa/include/asm/uaccess.h | 56 +++++++++++++++++--------------
 1 file changed, 30 insertions(+), 26 deletions(-)

diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index aca510707189..43e923678dfb 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -172,19 +172,19 @@ __asm__ __volatile__(					\
 
 #define __get_user_nocheck(x, ptr, size)			\
 ({								\
-	long __gu_err, __gu_val;				\
-	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
-	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
+	long __gu_err;						\
+	__get_user_size((x), (ptr), (size), __gu_err);		\
 	__gu_err;						\
 })
 
 #define __get_user_check(x, ptr, size)					\
 ({									\
-	long __gu_err = -EFAULT, __gu_val = 0;				\
+	long __gu_err = -EFAULT;					\
 	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
-	if (access_ok(__gu_addr, size))			\
-		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
-	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	if (access_ok(__gu_addr, size))					\
+		__get_user_size((x), __gu_addr, (size), __gu_err);	\
+	else								\
+		(x) = 0;						\
 	__gu_err;							\
 })
 
@@ -208,7 +208,7 @@ do {									\
 		}							\
 		break;							\
 	}								\
-	default: (x) = __get_user_bad();				\
+	default: (x) = 0; __get_user_bad();				\
 	}								\
 } while (0)
 
@@ -218,24 +218,28 @@ do {									\
  * __check_align_* macros still work.
  */
 #define __get_user_asm(x_, addr_, err_, align, insn, cb) \
-__asm__ __volatile__(				\
-	__check_align_##align			\
-	"1: "insn"  %[x], %[addr], 0	\n"	\
-	"2:				\n"	\
-	"   .section  .fixup,\"ax\"	\n"	\
-	"   .align 4			\n"	\
-	"   .literal_position		\n"	\
-	"5:				\n"	\
-	"   movi   %[tmp], 2b		\n"	\
-	"   movi   %[x], 0		\n"	\
-	"   movi   %[err], %[efault]	\n"	\
-	"   jx     %[tmp]		\n"	\
-	"   .previous			\n"	\
-	"   .section  __ex_table,\"a\"	\n"	\
-	"   .long	1b, 5b		\n"	\
-	"   .previous"				\
-	:[err] "=r"(err_), [tmp] "=r"(cb), [x] "=r"(x_)\
-	:[addr] "r"(addr_), [efault] "i"(-EFAULT))
+do {							\
+	u32 __x;					\
+	__asm__ __volatile__(				\
+		__check_align_##align			\
+		"1: "insn"  %[x], %[addr], 0	\n"	\
+		"2:				\n"	\
+		"   .section  .fixup,\"ax\"	\n"	\
+		"   .align 4			\n"	\
+		"   .literal_position		\n"	\
+		"5:				\n"	\
+		"   movi   %[tmp], 2b		\n"	\
+		"   movi   %[x], 0		\n"	\
+		"   movi   %[err], %[efault]	\n"	\
+		"   jx     %[tmp]		\n"	\
+		"   .previous			\n"	\
+		"   .section  __ex_table,\"a\"	\n"	\
+		"   .long	1b, 5b		\n"	\
+		"   .previous"				\
+		:[err] "=r"(err_), [tmp] "=r"(cb), [x] "=r"(__x) \
+		:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+	(x_) = (__force __typeof__(*(addr_)))__x;	\
+} while (0)
 
 
 /*
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 4/4] xtensa: initialize result in __get_user_asm for unaligned access
  2019-10-12  0:37 [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values Max Filippov
                   ` (2 preceding siblings ...)
  2019-10-12  0:37 ` [PATCH v2 3/4] xtensa: fix type conversion in __get_user_[no]check Max Filippov
@ 2019-10-12  0:37 ` Max Filippov
  3 siblings, 0 replies; 5+ messages in thread
From: Max Filippov @ 2019-10-12  0:37 UTC (permalink / raw)
  To: linux-xtensa, Al Viro; +Cc: Chris Zankel, linux-kernel, Max Filippov

__get_user_asm macro leaves result register uninitialized when alignment
check fails. Add 'insn' parameter to __check_align_{1,2,4} and pass an
instruction that initializes result register from __get_user_asm.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
 arch/xtensa/include/asm/uaccess.h | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 43e923678dfb..d8cff972f3cf 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -129,17 +129,19 @@ do {									\
  * sync.
  */
 
-#define __check_align_1  ""
+#define __check_align_1(insn)  ""
 
-#define __check_align_2				\
+#define __check_align_2(insn)			\
 	"   _bbci.l %[addr], 0, 1f	\n"	\
 	"   movi    %[err], %[efault]	\n"	\
+	"   "insn"			\n"	\
 	"   _j      2f			\n"
 
-#define __check_align_4				\
+#define __check_align_4(insn)			\
 	"   _bbsi.l %[addr], 0, 0f	\n"	\
 	"   _bbci.l %[addr], 1, 1f	\n"	\
 	"0: movi    %[err], %[efault]	\n"	\
+	"   "insn"			\n"	\
 	"   _j      2f			\n"
 
 
@@ -153,7 +155,7 @@ do {									\
  */
 #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(					\
-	__check_align_##align				\
+	__check_align_##align("")			\
 	"1: "insn"  %[x], %[addr], 0	\n"		\
 	"2:				\n"		\
 	"   .section  .fixup,\"ax\"	\n"		\
@@ -221,7 +223,7 @@ do {									\
 do {							\
 	u32 __x;					\
 	__asm__ __volatile__(				\
-		__check_align_##align			\
+		__check_align_##align("movi %[x], 0")	\
 		"1: "insn"  %[x], %[addr], 0	\n"	\
 		"2:				\n"	\
 		"   .section  .fixup,\"ax\"	\n"	\
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-10-12  0:37 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-12  0:37 [PATCH v2 0/4] xtensa: fix {get,put}_user() for 64bit values Max Filippov
2019-10-12  0:37 ` [PATCH v2 1/4] " Max Filippov
2019-10-12  0:37 ` [PATCH v2 2/4] xtensa: clean up assembly arguments in uaccess macros Max Filippov
2019-10-12  0:37 ` [PATCH v2 3/4] xtensa: fix type conversion in __get_user_[no]check Max Filippov
2019-10-12  0:37 ` [PATCH v2 4/4] xtensa: initialize result in __get_user_asm for unaligned access Max Filippov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).