linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup
@ 2016-07-31 23:50 Mikulas Patocka
  2016-08-02  4:33 ` David Miller
  0 siblings, 1 reply; 5+ messages in thread
From: Mikulas Patocka @ 2016-07-31 23:50 UTC (permalink / raw)
  To: David Miller; +Cc: sparclinux, linux-kernel

When a fault in ___copy_from_user happens, the function
copy_from_user_fixup is called. It calls the function compute_size that
reads the faulting address from current_thread_info()->fault_address and
determines how many bytes were copied.

There are multiple ___copy_from_user implementations for various
processors. Some of these implementations read multiple values ahead, for
example this piece of code exists in U1copy_from_user.o:
     124:       c1 9a 4e 20     ldda  [ %o1 ] #ASI_BLK_AIUS, %f0
     128:       92 02 60 40     add  %o1, 0x40, %o1
     12c:       82 00 40 03     add  %g1, %g3, %g1
     130:       e1 9a 4e 20     ldda  [ %o1 ] #ASI_BLK_AIUS, %f16
     134:       92 02 60 40     add  %o1, 0x40, %o1
     138:       8e 21 e0 80     sub  %g7, 0x80, %g7
     13c:       c3 9a 4e 20     ldda  [ %o1 ] #ASI_BLK_AIUS, %f32
     140:       92 02 60 40     add  %o1, 0x40, %o1
     144:       97 28 a0 03     sll  %g2, 3, %o3
     148:       96 22 c0 02     sub  %o3, %g2, %o3
     14c:       97 2a f0 04     sllx  %o3, 4, %o3
     150:       96 02 c0 02     add  %o3, %g2, %o3
     154:       85 2a f0 02     sllx  %o3, 2, %g2
     158:       97 41 40 00     rd  %pc, %o3
     15c:       96 02 e0 28     add  %o3, 0x28, %o3
     160:       81 c2 c0 02     jmp  %o3 + %g2
     164:       01 00 00 00     nop

It prefetches 192 bytes into the floating point register file and
additional 64 bytes are fetched at the target of the jump.

If a page fault happens at some of these ldda instructions, the address of
the fauling page will be saved in current_thread_info()->fault_address.
The routine compute_size assumes that bytes up to the faulting address
were already copied. However this assumption may be wrong if the faulting
instruction is not the first ldda instruction.

This patch fixes the bug by subtracting 0x100 from the faulting address
when handling a fault in copy_from_user_fixup. So that when a fault
happens, it is assumed that bytes up to the faulting address minus 0x100
were copied.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
 arch/sparc/lib/user_fixup.c |   15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

Index: linux-4.4.16/arch/sparc/lib/user_fixup.c
===================================================================
--- linux-4.4.16.orig/arch/sparc/lib/user_fixup.c	2016-07-29 22:16:26.000000000 +0200
+++ linux-4.4.16/arch/sparc/lib/user_fixup.c	2016-07-31 01:37:14.000000000 +0200
@@ -11,6 +11,13 @@
 
 #include <asm/uaccess.h>
 
+/* The copy_from_user routine can read up to 0x100 bytes in advance before
+ * writing them to kernelspace.
+ * So, we must subtract this value from the fault address when copying from
+ * userspace.
+ */
+#define COPY_FROM_USER_PREFETCH		0x100
+
 /* Calculating the exact fault address when using
  * block loads and stores can be very complicated.
  *
@@ -18,9 +25,9 @@
  * of the cases, just fix things up simply here.
  */
 
-static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
+static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset, unsigned long prefetch)
 {
-	unsigned long fault_addr = current_thread_info()->fault_address;
+	unsigned long fault_addr = current_thread_info()->fault_address - prefetch;
 	unsigned long end = start + size;
 
 	if (fault_addr < start || fault_addr >= end) {
@@ -36,7 +43,7 @@ unsigned long copy_from_user_fixup(void
 {
 	unsigned long offset;
 
-	size = compute_size((unsigned long) from, size, &offset);
+	size = compute_size((unsigned long) from, size, &offset, COPY_FROM_USER_PREFETCH);
 	if (likely(size))
 		memset(to + offset, 0, size);
 
@@ -48,7 +55,7 @@ unsigned long copy_to_user_fixup(void __
 {
 	unsigned long offset;
 
-	return compute_size((unsigned long) to, size, &offset);
+	return compute_size((unsigned long) to, size, &offset, 0);
 }
 EXPORT_SYMBOL(copy_to_user_fixup);
 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup
  2016-07-31 23:50 [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup Mikulas Patocka
@ 2016-08-02  4:33 ` David Miller
  2016-08-02 12:20   ` Mikulas Patocka
  0 siblings, 1 reply; 5+ messages in thread
From: David Miller @ 2016-08-02  4:33 UTC (permalink / raw)
  To: mpatocka; +Cc: sparclinux, linux-kernel

From: Mikulas Patocka <mpatocka@redhat.com>
Date: Sun, 31 Jul 2016 19:50:57 -0400 (EDT)

> @@ -18,9 +25,9 @@
>   * of the cases, just fix things up simply here.
>   */
>  
> -static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
> +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset, unsigned long prefetch)
>  {
> -	unsigned long fault_addr = current_thread_info()->fault_address;
> +	unsigned long fault_addr = current_thread_info()->fault_address - prefetch;
>  	unsigned long end = start + size;
>  
>  	if (fault_addr < start || fault_addr >= end) {
> @@ -36,7 +43,7 @@ unsigned long copy_from_user_fixup(void
>  {
>  	unsigned long offset;
>  
> -	size = compute_size((unsigned long) from, size, &offset);
> +	size = compute_size((unsigned long) from, size, &offset, COPY_FROM_USER_PREFETCH);
>  	if (likely(size))
>  		memset(to + offset, 0, size);
>  

I think this might cause a problem.  Assume we are not in one of those
prefetching loops and are just doing a byte at a time, and therefore
hit the fault exactly at the beginning of the missing page.

You will rewind 0x100 bytes and the caller will restart the copy at
"faulting address  - 0x100".

If someone is using atomic user copies, and using the returned length
to determine which page in userspace needs to be faulted in, and
then restart the copy, then we will loop forever.

We must, therefore, find some way to calculate this length _precisely_.
It must be exactly at the furthest byte successfully copied to the
destination.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup
  2016-08-02  4:33 ` David Miller
@ 2016-08-02 12:20   ` Mikulas Patocka
  2016-08-02 17:47     ` David Miller
  0 siblings, 1 reply; 5+ messages in thread
From: Mikulas Patocka @ 2016-08-02 12:20 UTC (permalink / raw)
  To: David Miller; +Cc: sparclinux, linux-kernel



On Mon, 1 Aug 2016, David Miller wrote:

> From: Mikulas Patocka <mpatocka@redhat.com>
> Date: Sun, 31 Jul 2016 19:50:57 -0400 (EDT)
> 
> > @@ -18,9 +25,9 @@
> >   * of the cases, just fix things up simply here.
> >   */
> >  
> > -static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
> > +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset, unsigned long prefetch)
> >  {
> > -	unsigned long fault_addr = current_thread_info()->fault_address;
> > +	unsigned long fault_addr = current_thread_info()->fault_address - prefetch;
> >  	unsigned long end = start + size;
> >  
> >  	if (fault_addr < start || fault_addr >= end) {
> > @@ -36,7 +43,7 @@ unsigned long copy_from_user_fixup(void
> >  {
> >  	unsigned long offset;
> >  
> > -	size = compute_size((unsigned long) from, size, &offset);
> > +	size = compute_size((unsigned long) from, size, &offset, COPY_FROM_USER_PREFETCH);
> >  	if (likely(size))
> >  		memset(to + offset, 0, size);
> >  
> 
> I think this might cause a problem.  Assume we are not in one of those
> prefetching loops and are just doing a byte at a time, and therefore
> hit the fault exactly at the beginning of the missing page.
> 
> You will rewind 0x100 bytes and the caller will restart the copy at
> "faulting address  - 0x100".
> 
> If someone is using atomic user copies, and using the returned length
> to determine which page in userspace needs to be faulted in, and
> then restart the copy, then we will loop forever.

This isn't guaranteed on x86 neither.

__copy_user_intel reads and writes 64 bytes in one loop iteration (and it 
prefetches the data for the next iteration with "movl 64(%4), %%eax". If 
it fails, it reports the amount of remaining data at the start of the loop 
iteration. The reported value may be 67 bytes lower than the fault 
location.

Mikulas

> We must, therefore, find some way to calculate this length _precisely_.
> It must be exactly at the furthest byte successfully copied to the
> destination.
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup
  2016-08-02 12:20   ` Mikulas Patocka
@ 2016-08-02 17:47     ` David Miller
  2016-08-04  0:21       ` David Miller
  0 siblings, 1 reply; 5+ messages in thread
From: David Miller @ 2016-08-02 17:47 UTC (permalink / raw)
  To: mpatocka; +Cc: sparclinux, linux-kernel

From: Mikulas Patocka <mpatocka@redhat.com>
Date: Tue, 2 Aug 2016 08:20:15 -0400 (EDT)

> On Mon, 1 Aug 2016, David Miller wrote:
> 
>> From: Mikulas Patocka <mpatocka@redhat.com>
>> Date: Sun, 31 Jul 2016 19:50:57 -0400 (EDT)
>> 
>> > @@ -18,9 +25,9 @@
>> >   * of the cases, just fix things up simply here.
>> >   */
>> >  
>> > -static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
>> > +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset, unsigned long prefetch)
>> >  {
>> > -	unsigned long fault_addr = current_thread_info()->fault_address;
>> > +	unsigned long fault_addr = current_thread_info()->fault_address - prefetch;
>> >  	unsigned long end = start + size;
>> >  
>> >  	if (fault_addr < start || fault_addr >= end) {
>> > @@ -36,7 +43,7 @@ unsigned long copy_from_user_fixup(void
>> >  {
>> >  	unsigned long offset;
>> >  
>> > -	size = compute_size((unsigned long) from, size, &offset);
>> > +	size = compute_size((unsigned long) from, size, &offset, COPY_FROM_USER_PREFETCH);
>> >  	if (likely(size))
>> >  		memset(to + offset, 0, size);
>> >  
>> 
>> I think this might cause a problem.  Assume we are not in one of those
>> prefetching loops and are just doing a byte at a time, and therefore
>> hit the fault exactly at the beginning of the missing page.
>> 
>> You will rewind 0x100 bytes and the caller will restart the copy at
>> "faulting address  - 0x100".
>> 
>> If someone is using atomic user copies, and using the returned length
>> to determine which page in userspace needs to be faulted in, and
>> then restart the copy, then we will loop forever.
> 
> This isn't guaranteed on x86 neither.
> 
> __copy_user_intel reads and writes 64 bytes in one loop iteration (and it 
> prefetches the data for the next iteration with "movl 64(%4), %%eax". If 
> it fails, it reports the amount of remaining data at the start of the loop 
> iteration. The reported value may be 67 bytes lower than the fault 
> location.

That's very interesting, let me do some research into this, as I was
pretty sure something like futexes or similar had some requirement in
this area.

Thanks.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup
  2016-08-02 17:47     ` David Miller
@ 2016-08-04  0:21       ` David Miller
  0 siblings, 0 replies; 5+ messages in thread
From: David Miller @ 2016-08-04  0:21 UTC (permalink / raw)
  To: mpatocka; +Cc: sparclinux, linux-kernel

From: David Miller <davem@davemloft.net>
Date: Tue, 02 Aug 2016 10:47:52 -0700 (PDT)

> That's very interesting, let me do some research into this, as I was
> pretty sure something like futexes or similar had some requirement in
> this area.

Mikulas, just wanted to let you know where I am with this.

I looked over how this stuff works and what other architectures do
and determined that I was very wrong long ago to implement this
fixup mechanism as-coded.

The whole copy_in_user() ambiguity is a symptom of this poor design.

The best the fixup routine can do is guess and make a safe estimate,
and even with the prefetch adjustment, a future mempcy implementation
with a different max prefetch would require a change to this code all
over again.

The fact of the matter is that the code itself knows always how much
was successfully copied, in the loop iterator.

So what I'm trying to do is convert the various copy routines back to
something that uses to loop counter to (precisely) compute the return
value.

I've converted copy_in_user() and the GENmemcpy family, see below.
I'll try to do the rest over the next few days.

diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e9a51d6..1833236 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -235,20 +235,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
 }
 #define __copy_to_user copy_to_user
 
-unsigned long __must_check ___copy_in_user(void __user *to,
-					   const void __user *from,
-					   unsigned long size);
-unsigned long copy_in_user_fixup(void __user *to, void __user *from,
-				 unsigned long size);
-static inline unsigned long __must_check
-copy_in_user(void __user *to, void __user *from, unsigned long size)
-{
-	unsigned long ret = ___copy_in_user(to, from, size);
-
-	if (unlikely(ret))
-		ret = copy_in_user_fixup(to, from, size);
-	return ret;
-}
+unsigned long __must_check copy_in_user(void __user *to, void __user *from,
+					unsigned long size);
 #define __copy_in_user copy_in_user
 
 unsigned long __must_check __clear_user(void __user *, unsigned long);
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index b7d0bd6..2556828 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -3,11 +3,11 @@
  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
  */
 
-#define EX_LD(x)		\
+#define EX_LD(x,y)		\
 98:	x;			\
 	.section __ex_table,"a";\
 	.align 4;		\
-	.word 98b, __retl_one;	\
+	.word 98b, y;		\
 	.text;			\
 	.align 4;
 
@@ -23,7 +23,7 @@
 #define PREAMBLE					\
 	rd		%asi, %g1;			\
 	cmp		%g1, ASI_AIUS;			\
-	bne,pn		%icc, ___copy_in_user;		\
+	bne,pn		%icc, copy_in_user;		\
 	 nop
 #endif
 
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 780550e..1416917 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -3,11 +3,11 @@
  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
  */
 
-#define EX_ST(x)		\
+#define EX_ST(x,y)		\
 98:	x;			\
 	.section __ex_table,"a";\
 	.align 4;		\
-	.word 98b, __retl_one;	\
+	.word 98b, y;		\
 	.text;			\
 	.align 4;
 
@@ -27,7 +27,7 @@
 #define PREAMBLE					\
 	rd		%asi, %g1;			\
 	cmp		%g1, ASI_AIUS;			\
-	bne,pn		%icc, ___copy_in_user;		\
+	bne,pn		%icc, copy_in_user;		\
 	 nop
 #endif
 
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
index 89358ee..c7ac25b 100644
--- a/arch/sparc/lib/GENmemcpy.S
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -10,11 +10,11 @@
 #endif
 
 #ifndef EX_LD
-#define EX_LD(x)	x
+#define EX_LD(x,y)	x
 #endif
 
 #ifndef EX_ST
-#define EX_ST(x)	x
+#define EX_ST(x,y)	x
 #endif
 
 #ifndef EX_RETVAL
@@ -45,6 +45,21 @@
 	.register	%g3,#scratch
 
 	.text
+__retl_o4_1:
+	add	%o4, %o2, %o4
+	retl
+	 add	%o4, 1, %o0
+__retl_g1_8:
+	add	%g1, %o2, %g1
+	retl
+	 add	%g1, 8, %o0
+__retl_o2_4:
+	retl
+	 add	%o2, 4, %o0
+__retl_o2_1:
+	retl
+	 add	%o2, 1, %o0
+
 	.align		64
 
 	.globl	FUNC_NAME
@@ -73,8 +88,8 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	sub		%g0, %o4, %o4
 	sub		%o2, %o4, %o2
 1:	subcc		%o4, 1, %o4
-	EX_LD(LOAD(ldub, %o1, %g1))
-	EX_ST(STORE(stb, %g1, %o0))
+	EX_LD(LOAD(ldub, %o1, %g1),__retl_o4_1)
+	EX_ST(STORE(stb, %g1, %o0),__retl_o4_1)
 	add		%o1, 1, %o1
 	bne,pt		%XCC, 1b
 	add		%o0, 1, %o0
@@ -82,8 +97,8 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	andn		%o2, 0x7, %g1
 	sub		%o2, %g1, %o2
 1:	subcc		%g1, 0x8, %g1
-	EX_LD(LOAD(ldx, %o1, %g2))
-	EX_ST(STORE(stx, %g2, %o0))
+	EX_LD(LOAD(ldx, %o1, %g2),__retl_g1_8)
+	EX_ST(STORE(stx, %g2, %o0),__retl_g1_8)
 	add		%o1, 0x8, %o1
 	bne,pt		%XCC, 1b
 	 add		%o0, 0x8, %o0
@@ -100,8 +115,8 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 
 1:
 	subcc		%o2, 4, %o2
-	EX_LD(LOAD(lduw, %o1, %g1))
-	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	EX_LD(LOAD(lduw, %o1, %g1),__retl_o2_4)
+	EX_ST(STORE(stw, %g1, %o1 + %o3),__retl_o2_4)
 	bgu,pt		%XCC, 1b
 	 add		%o1, 4, %o1
 
@@ -111,8 +126,8 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	.align		32
 90:
 	subcc		%o2, 1, %o2
-	EX_LD(LOAD(ldub, %o1, %g1))
-	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	EX_LD(LOAD(ldub, %o1, %g1),__retl_o2_1)
+	EX_ST(STORE(stb, %g1, %o1 + %o3),__retl_o2_1)
 	bgu,pt		%XCC, 90b
 	 add		%o1, 1, %o1
 	retl
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 302c0e6..6c39812 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -8,18 +8,33 @@
 
 #define XCC xcc
 
-#define EX(x,y)			\
+#define EX(x,y,z)		\
 98:	x,y;			\
 	.section __ex_table,"a";\
 	.align 4;		\
-	.word 98b, __retl_one;	\
+	.word 98b, z;		\
 	.text;			\
 	.align 4;
 
+#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
+#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
+#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
+
 	.register	%g2,#scratch
 	.register	%g3,#scratch
 
 	.text
+__retl_o4_plus_8:
+	add	%o4, %o2, %o4
+	retl
+	 add	%o4, 8, %o0
+__retl_o2_plus_4:
+	retl
+	 add	%o2, 4, %o0
+__retl_o2_plus_1:
+	retl
+	 add	%o2, 1, %o0
+
 	.align	32
 
 	/* Don't try to get too fancy here, just nice and
@@ -28,7 +43,7 @@
 	 * to copy register windows around during thread cloning.
 	 */
 
-ENTRY(___copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
+ENTRY(copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
 	cmp		%o2, 0
 	be,pn		%XCC, 85f
 	 or		%o0, %o1, %o3
@@ -44,8 +59,8 @@ ENTRY(___copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
 	andn		%o2, 0x7, %o4
 	and		%o2, 0x7, %o2
 1:	subcc		%o4, 0x8, %o4
-	EX(ldxa [%o1] %asi, %o5)
-	EX(stxa %o5, [%o0] %asi)
+	EX_O4(ldxa [%o1] %asi, %o5)
+	EX_O4(stxa %o5, [%o0] %asi)
 	add		%o1, 0x8, %o1
 	bgu,pt		%XCC, 1b
 	 add		%o0, 0x8, %o0
@@ -53,8 +68,8 @@ ENTRY(___copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
 	be,pt		%XCC, 1f
 	 nop
 	sub		%o2, 0x4, %o2
-	EX(lduwa [%o1] %asi, %o5)
-	EX(stwa %o5, [%o0] %asi)
+	EX_O2_4(lduwa [%o1] %asi, %o5)
+	EX_O2_4(stwa %o5, [%o0] %asi)
 	add		%o1, 0x4, %o1
 	add		%o0, 0x4, %o0
 1:	cmp		%o2, 0
@@ -70,8 +85,8 @@ ENTRY(___copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
 
 82:
 	subcc		%o2, 4, %o2
-	EX(lduwa [%o1] %asi, %g1)
-	EX(stwa %g1, [%o0] %asi)
+	EX_O2_4(lduwa [%o1] %asi, %g1)
+	EX_O2_4(stwa %g1, [%o0] %asi)
 	add		%o1, 4, %o1
 	bgu,pt		%XCC, 82b
 	 add		%o0, 4, %o0
@@ -82,11 +97,11 @@ ENTRY(___copy_in_user)	/* %o0=dst, %o1=src, %o2=len */
 	.align	32
 90:
 	subcc		%o2, 1, %o2
-	EX(lduba [%o1] %asi, %g1)
-	EX(stba %g1, [%o0] %asi)
+	EX_O2_1(lduba [%o1] %asi, %g1)
+	EX_O2_1(stba %g1, [%o0] %asi)
 	add		%o1, 1, %o1
 	bgu,pt		%XCC, 90b
 	 add		%o0, 1, %o0
 	retl
 	 clr		%o0
-ENDPROC(___copy_in_user)
+ENDPROC(copy_in_user)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index de5e978..a6674f4 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -95,7 +95,6 @@ EXPORT_SYMBOL(ip_fast_csum);
 /* Moving data to/from/in userspace. */
 EXPORT_SYMBOL(___copy_to_user);
 EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(___copy_in_user);
 EXPORT_SYMBOL(__clear_user);
 
 /* Atomic counter implementation. */
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
index ac96ae2..49737d9 100644
--- a/arch/sparc/lib/user_fixup.c
+++ b/arch/sparc/lib/user_fixup.c
@@ -51,21 +51,3 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned lon
 	return compute_size((unsigned long) to, size, &offset);
 }
 EXPORT_SYMBOL(copy_to_user_fixup);
-
-unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
-{
-	unsigned long fault_addr = current_thread_info()->fault_address;
-	unsigned long start = (unsigned long) to;
-	unsigned long end = start + size;
-
-	if (fault_addr >= start && fault_addr < end)
-		return end - fault_addr;
-
-	start = (unsigned long) from;
-	end = start + size;
-	if (fault_addr >= start && fault_addr < end)
-		return end - fault_addr;
-
-	return size;
-}
-EXPORT_SYMBOL(copy_in_user_fixup);

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-08-04  0:21 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-31 23:50 [PATCH 1/2] sparc: fix incorrect value returned by copy_from_user_fixup Mikulas Patocka
2016-08-02  4:33 ` David Miller
2016-08-02 12:20   ` Mikulas Patocka
2016-08-02 17:47     ` David Miller
2016-08-04  0:21       ` David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).