linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Viresh Kumar <viresh.kumar@linaro.org>
To: linux-arm-kernel@lists.infradead.org,
	Julien Thierry <Julien.Thierry@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Will Deacon <will.deacon@arm.com>,
	stable@vger.kernel.org, mark.brown@arm.com,
	Catalin Marinas <catalin.marinas@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>
Subject: [PATCH v4.4 05/45] arm64: Make USER_DS an inclusive limit
Date: Fri, 14 Jun 2019 08:37:48 +0530	[thread overview]
Message-ID: <86a5655ffd342f6f62ae1280cd5131868abfa6de.1560480942.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1560480942.git.viresh.kumar@linaro.org>

From: Robin Murphy <robin.murphy@arm.com>

commit 51369e398d0d33e8f524314e672b07e8cf870e79 upstream.

Currently, USER_DS represents an exclusive limit while KERNEL_DS is
inclusive. In order to do some clever trickery for speculation-safe
masking, we need them both to behave equivalently - there aren't enough
bits to make KERNEL_DS exclusive, so we have precisely one option. This
also happens to correct a longstanding false negative for a range
ending on the very top byte of kernel memory.

Mark Rutland points out that we've actually got the semantics of
addresses vs. segments muddled up in most of the places we need to
amend, so shuffle the {USER,KERNEL}_DS definitions around such that we
can correct those properly instead of just pasting "-1"s everywhere.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ 4.4: Dropped changes from fault.c and fixed minor rebase conflict ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/include/asm/processor.h |  3 ++
 arch/arm64/include/asm/uaccess.h   | 45 +++++++++++++++++-------------
 arch/arm64/kernel/entry.S          |  4 +--
 3 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 12d5b2b97f04..c49597ae529d 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
 
 #define TASK_SIZE_64		(UL(1) << VA_BITS)
 
+#define KERNEL_DS	UL(-1)
+#define USER_DS		(TASK_SIZE_64 - 1)
+
 #ifndef __ASSEMBLY__
 
 /*
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 829fa6d3e561..c625cc5531fc 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -56,10 +56,7 @@ struct exception_table_entry
 
 extern int fixup_exception(struct pt_regs *regs);
 
-#define KERNEL_DS	(-1UL)
 #define get_ds()	(KERNEL_DS)
-
-#define USER_DS		TASK_SIZE_64
 #define get_fs()	(current_thread_info()->addr_limit)
 
 static inline void set_fs(mm_segment_t fs)
@@ -87,22 +84,32 @@ static inline void set_fs(mm_segment_t fs)
  * Returns 1 if the range is valid, 0 otherwise.
  *
  * This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
  */
-#define __range_ok(addr, size)						\
-({									\
-	unsigned long __addr = (unsigned long __force)(addr);		\
-	unsigned long flag, roksum;					\
-	__chk_user_ptr(addr);						\
-	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
-		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (__addr), "Ir" (size),				\
-		  "r" (current_thread_info()->addr_limit)		\
-		: "cc");						\
-	flag;								\
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+	unsigned long limit = current_thread_info()->addr_limit;
+
+	__chk_user_ptr(addr);
+	asm volatile(
+	// A + B <= C + 1 for all A,B,C, in four easy steps:
+	// 1: X = A + B; X' = X % 2^64
+	"	adds	%0, %0, %2\n"
+	// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+	"	csel	%1, xzr, %1, hi\n"
+	// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+	//    to compensate for the carry flag being set in step 4. For
+	//    X > 2^64, X' merely has to remain nonzero, which it does.
+	"	csinv	%0, %0, xzr, cc\n"
+	// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+	//    comes from the carry in being clear. Otherwise, we are
+	//    testing X' - C == 0, subject to the previous adjustments.
+	"	sbcs	xzr, %0, %1\n"
+	"	cset	%0, ls\n"
+	: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+	return addr;
+}
 
 /*
  * When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -111,7 +118,7 @@ static inline void set_fs(mm_segment_t fs)
  */
 #define untagged_addr(addr)		sign_extend64(addr, 55)
 
-#define access_ok(type, addr, size)	__range_ok(addr, size)
+#define access_ok(type, addr, size)	__range_ok((unsigned long)(addr), size)
 #define user_addr_max			get_fs
 
 /*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c849be9231bb..4c5013b09dcb 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -96,10 +96,10 @@
 	.else
 	add	x21, sp, #S_FRAME_SIZE
 	get_thread_info tsk
-	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+	/* Save the task's original addr_limit and set USER_DS */
 	ldr	x20, [tsk, #TI_ADDR_LIMIT]
 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
-	mov	x20, #TASK_SIZE_64
+	mov	x20, #USER_DS
 	str	x20, [tsk, #TI_ADDR_LIMIT]
 	.endif /* \el == 0 */
 	mrs	x22, elr_el1
-- 
2.21.0.rc0.269.g1a574e7a288b


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-06-14  3:14 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-14  3:07 [PATCH v4.4 00/45] V4.4 backport of arm64 Spectre patches Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 01/45] arm64: barrier: Add CSDB macros to control data-value prediction Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 02/45] arm64: Implement array_index_mask_nospec() Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 03/45] arm64: remove duplicate macro __KERNEL__ check Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 04/45] arm64: move TASK_* definitions to <asm/processor.h> Viresh Kumar
2019-06-14  3:07 ` Viresh Kumar [this message]
2019-06-14  3:07 ` [PATCH v4.4 06/45] arm64: Use pointer masking to limit uaccess speculation Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 07/45] arm64: entry: Ensure branch through syscall table is bounded under speculation Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 08/45] arm64: uaccess: Prevent speculative use of the current addr_limit Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 09/45] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 10/45] mm/kasan: add API to check memory regions Viresh Kumar
2019-07-04 14:15   ` Julien Thierry
2019-07-05  3:13     ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 11/45] arm64: kasan: instrument user memory access API Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 12/45] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 13/45] arm64: cpufeature: Pass capability structure to ->enable callback Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 14/45] drivers/firmware: Expose psci_get_version through psci_ops structure Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 15/45] arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 16/45] arm64: Move post_ttbr_update_workaround to C code Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 17/45] arm64: cpufeature: Add scope for capability check Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 18/45] arm64: Add skeleton to harden the branch predictor against aliasing attacks Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 19/45] arm64: Move BP hardening to check_and_switch_context Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 20/45] mm: Introduce lm_alias Viresh Kumar
2019-06-17 12:33   ` Julien Thierry
2019-06-18  5:00     ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 21/45] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 22/45] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 23/45] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 24/45] arm64: cpu_errata: Allow an erratum to be match for all revisions of a core Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 25/45] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 26/45] arm64: cputype info for Broadcom Vulcan Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 27/45] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 28/45] arm64: Branch predictor hardening for Cavium ThunderX2 Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 29/45] arm64: KVM: Increment PC after handling an SMC trap Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 30/45] arm/arm64: KVM: Consolidate the PSCI include files Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 31/45] arm/arm64: KVM: Add PSCI_VERSION helper Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 32/45] arm/arm64: KVM: Add smccc accessors to PSCI code Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 33/45] ARM: 8478/2: arm/arm64: add arm-smccc Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 34/45] arm/arm64: KVM: Implement PSCI 1.0 support Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 35/45] arm/arm64: KVM: Advertise SMCCC v1.1 Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 36/45] arm/arm64: KVM: Turn kvm_psci_version into a static inline Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 37/45] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 38/45] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 39/45] firmware/psci: Expose PSCI conduit Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 40/45] firmware/psci: Expose SMCCC version through psci_ops Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 41/45] arm/arm64: smccc: Make function identifiers an unsigned quantity Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 42/45] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 43/45] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 44/45] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 45/45] arm64: futex: Mask __user pointers prior to dereference Viresh Kumar
2019-06-17 12:10 ` [PATCH v4.4 00/45] V4.4 backport of arm64 Spectre patches Greg KH
2019-06-17 16:03 ` Julien Thierry
2019-06-18 10:21   ` Viresh Kumar
2019-06-19 11:03     ` Julien Thierry
2019-06-19 11:20       ` Viresh Kumar
2019-06-17 16:30 ` Julien Thierry
2019-07-11 13:57 ` Julien Thierry

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=86a5655ffd342f6f62ae1280cd5131868abfa6de.1560480942.git.viresh.kumar@linaro.org \
    --to=viresh.kumar@linaro.org \
    --cc=Julien.Thierry@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.brown@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=rmk+kernel@arm.linux.org.uk \
    --cc=stable@vger.kernel.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).