linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arm64: Avoid premature usercopy failure
@ 2021-07-12 14:27 Robin Murphy
  2021-07-13 16:59 ` Will Deacon
  2021-07-15 17:39 ` Will Deacon
  0 siblings, 2 replies; 4+ messages in thread
From: Robin Murphy @ 2021-07-12 14:27 UTC (permalink / raw)
  To: will, catalin.marinas
  Cc: linux-arm-kernel, linux-mm, linux-kernel, stable, Chen Huang, Al Viro

Al reminds us that the usercopy API must only return complete failure
if absolutely nothing could be copied. Currently, if userspace does
something silly like giving us an unaligned pointer to Device memory,
or a size which overruns MTE tag bounds, we may fail to honour that
requirement when faulting on a multi-byte access even though a smaller
access could have succeeded.

Add a mitigation to the fixup routines to fall back to a single-byte
copy if we faulted on a larger access before anything has been written
to the destination, to guarantee making *some* forward progress. We
needn't be too concerned about the overall performance since this should
only occur when callers are doing something a bit dodgy in the first
place. Particularly broken userspace might still be able to trick
generic_perform_write() into an infinite loop by targeting write() at
an mmap() of some read-only device register where the fault-in load
succeeds but any store synchronously aborts such that copy_to_user() is
genuinely unable to make progress, but, well, don't do that...

CC: stable@vger.kernel.org
Reported-by: Chen Huang <chenhuang5@huawei.com>
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---

I've started trying the "replay" approach for figuring out more precise
remainders in general, but that quickly got more complicated with
rebasing the fault address passing stuff, so I'm resending this now as
a point fix and will continue to explore that as an improvement on top.

Robin.

 arch/arm64/lib/copy_from_user.S | 13 ++++++++++---
 arch/arm64/lib/copy_in_user.S   | 21 ++++++++++++++-------
 arch/arm64/lib/copy_to_user.S   | 14 +++++++++++---
 3 files changed, 35 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 95cd62d67371..2cf999e41d30 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -29,7 +29,7 @@
 	.endm
 
 	.macro ldrh1 reg, ptr, val
-	user_ldst 9998f, ldtrh, \reg, \ptr, \val
+	user_ldst 9997f, ldtrh, \reg, \ptr, \val
 	.endm
 
 	.macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@
 	.endm
 
 	.macro ldr1 reg, ptr, val
-	user_ldst 9998f, ldtr, \reg, \ptr, \val
+	user_ldst 9997f, ldtr, \reg, \ptr, \val
 	.endm
 
 	.macro str1 reg, ptr, val
@@ -45,7 +45,7 @@
 	.endm
 
 	.macro ldp1 reg1, reg2, ptr, val
-	user_ldp 9998f, \reg1, \reg2, \ptr, \val
+	user_ldp 9997f, \reg1, \reg2, \ptr, \val
 	.endm
 
 	.macro stp1 reg1, reg2, ptr, val
@@ -53,8 +53,10 @@
 	.endm
 
 end	.req	x5
+srcin	.req	x15
 SYM_FUNC_START(__arch_copy_from_user)
 	add	end, x0, x2
+	mov	srcin, x1
 #include "copy_template.S"
 	mov	x0, #0				// Nothing to copy
 	ret
@@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
 
 	.section .fixup,"ax"
 	.align	2
+9997:	cmp	dst, dstin
+	b.ne	9998f
+	// Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+	strb	tmp1w, [dst], #1
 9998:	sub	x0, end, dst			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 1f61cd0df062..dbea3799c3ef 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -30,33 +30,34 @@
 	.endm
 
 	.macro ldrh1 reg, ptr, val
-	user_ldst 9998f, ldtrh, \reg, \ptr, \val
+	user_ldst 9997f, ldtrh, \reg, \ptr, \val
 	.endm
 
 	.macro strh1 reg, ptr, val
-	user_ldst 9998f, sttrh, \reg, \ptr, \val
+	user_ldst 9997f, sttrh, \reg, \ptr, \val
 	.endm
 
 	.macro ldr1 reg, ptr, val
-	user_ldst 9998f, ldtr, \reg, \ptr, \val
+	user_ldst 9997f, ldtr, \reg, \ptr, \val
 	.endm
 
 	.macro str1 reg, ptr, val
-	user_ldst 9998f, sttr, \reg, \ptr, \val
+	user_ldst 9997f, sttr, \reg, \ptr, \val
 	.endm
 
 	.macro ldp1 reg1, reg2, ptr, val
-	user_ldp 9998f, \reg1, \reg2, \ptr, \val
+	user_ldp 9997f, \reg1, \reg2, \ptr, \val
 	.endm
 
 	.macro stp1 reg1, reg2, ptr, val
-	user_stp 9998f, \reg1, \reg2, \ptr, \val
+	user_stp 9997f, \reg1, \reg2, \ptr, \val
 	.endm
 
 end	.req	x5
-
+srcin	.req	x15
 SYM_FUNC_START(__arch_copy_in_user)
 	add	end, x0, x2
+	mov	srcin, x1
 #include "copy_template.S"
 	mov	x0, #0
 	ret
@@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
+9997:	cmp	dst, dstin
+	b.ne	9998f
+	// Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+	add	dst, dst, #1
 9998:	sub	x0, end, dst			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 043da90f5dd7..9f380eecf653 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -32,7 +32,7 @@
 	.endm
 
 	.macro strh1 reg, ptr, val
-	user_ldst 9998f, sttrh, \reg, \ptr, \val
+	user_ldst 9997f, sttrh, \reg, \ptr, \val
 	.endm
 
 	.macro ldr1 reg, ptr, val
@@ -40,7 +40,7 @@
 	.endm
 
 	.macro str1 reg, ptr, val
-	user_ldst 9998f, sttr, \reg, \ptr, \val
+	user_ldst 9997f, sttr, \reg, \ptr, \val
 	.endm
 
 	.macro ldp1 reg1, reg2, ptr, val
@@ -48,12 +48,14 @@
 	.endm
 
 	.macro stp1 reg1, reg2, ptr, val
-	user_stp 9998f, \reg1, \reg2, \ptr, \val
+	user_stp 9997f, \reg1, \reg2, \ptr, \val
 	.endm
 
 end	.req	x5
+srcin	.req	x15
 SYM_FUNC_START(__arch_copy_to_user)
 	add	end, x0, x2
+	mov	srcin, x1
 #include "copy_template.S"
 	mov	x0, #0
 	ret
@@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
 
 	.section .fixup,"ax"
 	.align	2
+9997:	cmp	dst, dstin
+	b.ne	9998f
+	// Before being absolutely sure we couldn't copy anything, try harder
+	ldrb	tmp1w, [srcin]
+USER(9998f, sttrb tmp1w, [dst])
+	add	dst, dst, #1
 9998:	sub	x0, end, dst			// bytes not copied
 	ret
 	.previous
-- 
2.21.0.dirty


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] arm64: Avoid premature usercopy failure
  2021-07-12 14:27 [PATCH] arm64: Avoid premature usercopy failure Robin Murphy
@ 2021-07-13 16:59 ` Will Deacon
  2021-07-13 17:20   ` Catalin Marinas
  2021-07-15 17:39 ` Will Deacon
  1 sibling, 1 reply; 4+ messages in thread
From: Will Deacon @ 2021-07-13 16:59 UTC (permalink / raw)
  To: Robin Murphy
  Cc: catalin.marinas, linux-arm-kernel, linux-mm, linux-kernel,
	stable, Chen Huang, Al Viro

On Mon, Jul 12, 2021 at 03:27:46PM +0100, Robin Murphy wrote:
> Al reminds us that the usercopy API must only return complete failure
> if absolutely nothing could be copied. Currently, if userspace does
> something silly like giving us an unaligned pointer to Device memory,
> or a size which overruns MTE tag bounds, we may fail to honour that
> requirement when faulting on a multi-byte access even though a smaller
> access could have succeeded.
> 
> Add a mitigation to the fixup routines to fall back to a single-byte
> copy if we faulted on a larger access before anything has been written
> to the destination, to guarantee making *some* forward progress. We
> needn't be too concerned about the overall performance since this should
> only occur when callers are doing something a bit dodgy in the first
> place. Particularly broken userspace might still be able to trick
> generic_perform_write() into an infinite loop by targeting write() at
> an mmap() of some read-only device register where the fault-in load
> succeeds but any store synchronously aborts such that copy_to_user() is
> genuinely unable to make progress, but, well, don't do that...
> 
> CC: stable@vger.kernel.org
> Reported-by: Chen Huang <chenhuang5@huawei.com>
> Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> ---
> 
> I've started trying the "replay" approach for figuring out more precise
> remainders in general, but that quickly got more complicated with
> rebasing the fault address passing stuff, so I'm resending this now as
> a point fix and will continue to explore that as an improvement on top.

Is it possible to add/extend a selftest for this, please? I think Catalin
mentioned that before, but not sure if he got anywhere with it.

Will

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] arm64: Avoid premature usercopy failure
  2021-07-13 16:59 ` Will Deacon
@ 2021-07-13 17:20   ` Catalin Marinas
  0 siblings, 0 replies; 4+ messages in thread
From: Catalin Marinas @ 2021-07-13 17:20 UTC (permalink / raw)
  To: Will Deacon
  Cc: Robin Murphy, linux-arm-kernel, linux-mm, linux-kernel, stable,
	Chen Huang, Al Viro

On Tue, Jul 13, 2021 at 05:59:58PM +0100, Will Deacon wrote:
> On Mon, Jul 12, 2021 at 03:27:46PM +0100, Robin Murphy wrote:
> > Al reminds us that the usercopy API must only return complete failure
> > if absolutely nothing could be copied. Currently, if userspace does
> > something silly like giving us an unaligned pointer to Device memory,
> > or a size which overruns MTE tag bounds, we may fail to honour that
> > requirement when faulting on a multi-byte access even though a smaller
> > access could have succeeded.
> > 
> > Add a mitigation to the fixup routines to fall back to a single-byte
> > copy if we faulted on a larger access before anything has been written
> > to the destination, to guarantee making *some* forward progress. We
> > needn't be too concerned about the overall performance since this should
> > only occur when callers are doing something a bit dodgy in the first
> > place. Particularly broken userspace might still be able to trick
> > generic_perform_write() into an infinite loop by targeting write() at
> > an mmap() of some read-only device register where the fault-in load
> > succeeds but any store synchronously aborts such that copy_to_user() is
> > genuinely unable to make progress, but, well, don't do that...
> > 
> > CC: stable@vger.kernel.org
> > Reported-by: Chen Huang <chenhuang5@huawei.com>
> > Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
> > Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
> > Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> > ---
> > 
> > I've started trying the "replay" approach for figuring out more precise
> > remainders in general, but that quickly got more complicated with
> > rebasing the fault address passing stuff, so I'm resending this now as
> > a point fix and will continue to explore that as an improvement on top.
> 
> Is it possible to add/extend a selftest for this, please? I think Catalin
> mentioned that before, but not sure if he got anywhere with it.

It's on my to-do list but going on holiday soon. If Robin is keen on
this, I don't really mind ;).

-- 
Catalin

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] arm64: Avoid premature usercopy failure
  2021-07-12 14:27 [PATCH] arm64: Avoid premature usercopy failure Robin Murphy
  2021-07-13 16:59 ` Will Deacon
@ 2021-07-15 17:39 ` Will Deacon
  1 sibling, 0 replies; 4+ messages in thread
From: Will Deacon @ 2021-07-15 17:39 UTC (permalink / raw)
  To: Robin Murphy, catalin.marinas
  Cc: kernel-team, Will Deacon, linux-arm-kernel, linux-kernel,
	Chen Huang, linux-mm, Al Viro, stable

On Mon, 12 Jul 2021 15:27:46 +0100, Robin Murphy wrote:
> Al reminds us that the usercopy API must only return complete failure
> if absolutely nothing could be copied. Currently, if userspace does
> something silly like giving us an unaligned pointer to Device memory,
> or a size which overruns MTE tag bounds, we may fail to honour that
> requirement when faulting on a multi-byte access even though a smaller
> access could have succeeded.
> 
> [...]

Applied to arm64 (for-next/fixes), thanks!

[1/1] arm64: Avoid premature usercopy failure
      https://git.kernel.org/arm64/c/295cf156231c

Cheers,
-- 
Will

https://fixes.arm64.dev
https://next.arm64.dev
https://will.arm64.dev

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-07-15 17:39 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-12 14:27 [PATCH] arm64: Avoid premature usercopy failure Robin Murphy
2021-07-13 16:59 ` Will Deacon
2021-07-13 17:20   ` Catalin Marinas
2021-07-15 17:39 ` Will Deacon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).