All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes
@ 2015-08-25 15:40 Russell King - ARM Linux
  2015-08-25 15:41 ` [PATCH v2 01/10] ARM: domains: switch to keeping domain value in register Russell King
                   ` (10 more replies)
  0 siblings, 11 replies; 24+ messages in thread
From: Russell King - ARM Linux @ 2015-08-25 15:40 UTC (permalink / raw)
  To: linux-arm-kernel

Here's v2 of the uaccess patches, with the comments raised so far
addressed.  As such, it's grown a tad more than the previous set of
patches.

 arch/arm/Kconfig                            | 15 +++++
 arch/arm/include/asm/assembler.h            | 47 ++++++++++++++++
 arch/arm/include/asm/domain.h               | 53 ++++++++++++++----
 arch/arm/include/asm/futex.h                | 19 ++++++-
 arch/arm/include/asm/pgtable-2level-hwdef.h |  1 +
 arch/arm/include/asm/thread_info.h          |  3 -
 arch/arm/include/asm/uaccess.h              | 85 +++++++++++++++++++++++++++--
 arch/arm/kernel/armksyms.c                  |  6 +-
 arch/arm/kernel/entry-armv.S                | 32 ++++++++---
 arch/arm/kernel/entry-common.S              |  2 +
 arch/arm/kernel/entry-header.S              |  5 ++
 arch/arm/kernel/head.S                      |  5 +-
 arch/arm/kernel/process.c                   | 37 ++++++++++---
 arch/arm/kernel/traps.c                     |  1 -
 arch/arm/lib/clear_user.S                   |  6 +-
 arch/arm/lib/copy_from_user.S               |  6 +-
 arch/arm/lib/copy_to_user.S                 |  6 +-
 arch/arm/lib/csumpartialcopyuser.S          | 14 +++++
 arch/arm/lib/uaccess_with_memcpy.c          |  4 +-
 arch/arm/mm/abort-ev4.S                     |  1 +
 arch/arm/mm/abort-ev5t.S                    |  4 +-
 arch/arm/mm/abort-ev5tj.S                   |  4 +-
 arch/arm/mm/abort-ev6.S                     |  8 ++-
 arch/arm/mm/abort-ev7.S                     |  1 +
 arch/arm/mm/abort-lv4t.S                    |  2 +
 arch/arm/mm/abort-macro.S                   | 14 ++---
 arch/arm/mm/mmu.c                           |  4 +-
 arch/arm/mm/pgd.c                           | 10 ++++
 28 files changed, 324 insertions(+), 71 deletions(-)

-- 
FTTC broadband for 0.8mile line: currently at 10.5Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 01/10] ARM: domains: switch to keeping domain value in register
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 02/10] ARM: domains: provide domain_mask() Russell King
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Rather than modifying both the domain access control register and our
per-thread copy, modify only the domain access control register, and
use the per-thread copy to save and restore the register over context
switches.  We can also avoid the explicit initialisation of the
init thread_info structure.

This allows us to avoid needing to gain access to the thread information
at the uaccess control sites.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h      | 20 +++++++++++++++-----
 arch/arm/include/asm/thread_info.h |  3 ---
 arch/arm/kernel/entry-armv.S       |  2 ++
 arch/arm/kernel/process.c          | 13 ++++++++++---
 4 files changed, 27 insertions(+), 11 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..7f2941905714 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -59,6 +59,17 @@
 
 #ifndef __ASSEMBLY__
 
+static inline unsigned int get_domain(void)
+{
+	unsigned int domain;
+
+	asm(
+	"mrc	p15, 0, %0, c3, c0	@ get domain"
+	 : "=r" (domain));
+
+	return domain;
+}
+
 #ifdef CONFIG_CPU_USE_DOMAINS
 static inline void set_domain(unsigned val)
 {
@@ -70,11 +81,10 @@ static inline void set_domain(unsigned val)
 
 #define modify_domain(dom,type)					\
 	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
+		unsigned int domain = get_domain();		\
+		domain &= ~domain_val(dom, DOMAIN_MANAGER);	\
+		domain = domain | domain_val(dom, type);	\
+		set_domain(domain);				\
 	} while (0)
 
 #else
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..0a0aec410d8c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
 }
 
 #define init_thread_info	(init_thread_union.thread_info)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac3086e361..d19adcf6c580 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -770,6 +770,8 @@ ENTRY(__switch_to)
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 	switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..e722f9b3c9b1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -146,10 +146,9 @@ void __show_regs(struct pt_regs *regs)
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 		{
-			unsigned int transbase, dac;
+			unsigned int transbase, dac = get_domain();
 			asm("mrc p15, 0, %0, c2, c0\n\t"
-			    "mrc p15, 0, %1, c3, c0\n"
-			    : "=r" (transbase), "=r" (dac));
+			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			  	transbase, dac);
 		}
@@ -210,6 +209,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+	/*
+	 * Copy the initial value of the domain access control register
+	 * from the current thread: thread->addr_limit will have been
+	 * copied from the current thread via setup_thread_stack() in
+	 * kernel/fork.c
+	 */
+	thread->cpu_domain = get_domain();
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->ARM_r0 = 0;
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 02/10] ARM: domains: provide domain_mask()
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
  2015-08-25 15:41 ` [PATCH v2 01/10] ARM: domains: switch to keeping domain value in register Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 03/10] ARM: domains: move initial domain setting value to asm/domains.h Russell King
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Provide a macro to generate the mask for a domain, rather than using
domain_val(, DOMAIN_MANAGER) which won't work when CPU_USE_DOMAINS
is turned off.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 7f2941905714..045b9b453bcd 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -55,7 +55,8 @@
 #define DOMAIN_MANAGER	1
 #endif
 
-#define domain_val(dom,type)	((type) << (2*(dom)))
+#define domain_mask(dom)	((3) << (2 * (dom)))
+#define domain_val(dom,type)	((type) << (2 * (dom)))
 
 #ifndef __ASSEMBLY__
 
@@ -82,7 +83,7 @@ static inline void set_domain(unsigned val)
 #define modify_domain(dom,type)					\
 	do {							\
 		unsigned int domain = get_domain();		\
-		domain &= ~domain_val(dom, DOMAIN_MANAGER);	\
+		domain &= ~domain_mask(dom);			\
 		domain = domain | domain_val(dom, type);	\
 		set_domain(domain);				\
 	} while (0)
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 03/10] ARM: domains: move initial domain setting value to asm/domains.h
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
  2015-08-25 15:41 ` [PATCH v2 01/10] ARM: domains: switch to keeping domain value in register Russell King
  2015-08-25 15:41 ` [PATCH v2 02/10] ARM: domains: provide domain_mask() Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 04/10] ARM: domains: get rid of manager mode for user domain Russell King
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h | 6 ++++++
 arch/arm/kernel/head.S        | 5 +----
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 045b9b453bcd..4218f88e8f7e 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -58,6 +58,12 @@
 #define domain_mask(dom)	((3) << (2 * (dom)))
 #define domain_val(dom,type)	((type) << (2 * (dom)))
 
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+
 #ifndef __ASSEMBLY__
 
 static inline unsigned int get_domain(void)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..d56e5e9a9e1e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -461,10 +461,7 @@ __enable_mmu:
 #ifdef CONFIG_ARM_LPAE
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 #else
-	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mov	r5, #DACR_INIT
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 04/10] ARM: domains: get rid of manager mode for user domain
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (2 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 03/10] ARM: domains: move initial domain setting value to asm/domains.h Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 05/10] ARM: domains: keep vectors in separate domain Russell King
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Since we switched to early trap initialisation in 94e5a85b3be0
("ARM: earlier initialization of vectors page") we haven't been writing
directly to the vectors page, and so there's no need for this domain
to be in manager mode.  Switch it to client mode.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h | 2 +-
 arch/arm/kernel/traps.c       | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 4218f88e8f7e..08b601e69ddc 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -59,7 +59,7 @@
 #define domain_val(dom,type)	((type) << (2 * (dom)))
 
 #define DACR_INIT \
-	(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 	 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
 	 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
 	kuser_init(vectors_base);
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 05/10] ARM: domains: keep vectors in separate domain
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (3 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 04/10] ARM: domains: get rid of manager mode for user domain Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 06/10] ARM: domains: remove DOMAIN_TABLE Russell King
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Keep the machine vectors in its own domain to avoid software based
user access control from making the vector code inaccessible, and
thereby deadlocking the machine.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h               |  4 +++-
 arch/arm/include/asm/pgtable-2level-hwdef.h |  1 +
 arch/arm/mm/mmu.c                           |  4 ++--
 arch/arm/mm/pgd.c                           | 10 ++++++++++
 4 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 08b601e69ddc..396a12e486fe 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -43,6 +43,7 @@
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #endif
+#define DOMAIN_VECTORS	3
 
 /*
  * Domain types
@@ -62,7 +63,8 @@
 	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 	 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-	 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK		PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 /*
  *   - section
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..a016de248034 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 		if (!new_pte)
 			goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Modify the PTE pointer to have the correct domain.  This
+		 * needs to be the vectors domain to avoid the low vectors
+		 * being unmapped.
+		 */
+		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
 		init_pud = pud_offset(init_pgd, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 06/10] ARM: domains: remove DOMAIN_TABLE
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (4 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 05/10] ARM: domains: keep vectors in separate domain Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 07/10] ARM: mm: improve do_ldrd_abort macro Russell King
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

DOMAIN_TABLE is not used; in any case, it aliases to the kernel domain.
Remove this definition.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/domain.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 396a12e486fe..2be929549938 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,12 +34,10 @@
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL	0
-#define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
 #define DOMAIN_IO	2
 #else
 #define DOMAIN_KERNEL	2
-#define DOMAIN_TABLE	2
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #endif
@@ -62,7 +60,6 @@
 #define DACR_INIT \
 	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-	 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
 	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
 
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 07/10] ARM: mm: improve do_ldrd_abort macro
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (5 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 06/10] ARM: domains: remove DOMAIN_TABLE Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:41 ` [PATCH v2 08/10] ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore() Russell King
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Improve the do_ldrd_abort macro code - firstly, it inefficiently checks
for the LDRD encoding by doing a multi-stage test of various bits.  This
can be simplified by generating a mask, bitmasking the instruction and
then comparing the result.

Secondly, we want to be able to test the result rather than branching
to do_DataAbort, so remove the branch at the end and rename the macro
to 'teq_ldrd' to reflect it's new usage.  teq_ldrd macro returns 'eq'
if the instruction was a LDRD.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mm/abort-ev5t.S  |  3 ++-
 arch/arm/mm/abort-ev5tj.S |  3 ++-
 arch/arm/mm/abort-ev6.S   |  3 ++-
 arch/arm/mm/abort-macro.S | 13 +++++--------
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..c913031b79cc 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -22,7 +22,8 @@ ENTRY(v5t_early_abort)
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ check write
 	orreq	r1, r1, #1 << 11
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..1b80d71adb0f 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,8 @@ ENTRY(v5tj_early_abort)
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..113704f30e9f 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -34,7 +34,8 @@ ENTRY(v6_early_abort)
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
 
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..50d6c0a900b1 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -29,12 +29,9 @@ not_thumb:
  *   [7:4] == 1101
  *    [20] == 0
  */
-	.macro	do_ldrd_abort, tmp, insn
-	tst	\insn, #0x0e100000		@ [27:25,20] == 0
-	bne	not_ldrd
-	and	\tmp, \insn, #0x000000f0	@ [7:4] == 1101
-	cmp	\tmp, #0x000000d0
-	beq	do_DataAbort
-not_ldrd:
+	.macro	teq_ldrd, tmp, insn
+	mov	\tmp, #0x0e100000
+	orr	\tmp, #0x000000f0
+	and	\tmp, \insn, \tmp
+	teq	\tmp, #0x000000d0
 	.endm
-
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 08/10] ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore()
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (6 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 07/10] ARM: mm: improve do_ldrd_abort macro Russell King
@ 2015-08-25 15:41 ` Russell King
  2015-08-25 15:42 ` [PATCH v2 09/10] ARM: entry: provide uaccess assembly macro hooks Russell King
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:41 UTC (permalink / raw)
  To: linux-arm-kernel

Provide uaccess_save_and_enable() and uaccess_restore() to permit
control of userspace visibility to the kernel, and hook these into
the appropriate places in the kernel where we need to access
userspace.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/futex.h       | 19 ++++++++--
 arch/arm/include/asm/uaccess.h     | 71 +++++++++++++++++++++++++++++++++++---
 arch/arm/kernel/armksyms.c         |  6 ++--
 arch/arm/lib/clear_user.S          |  6 ++--
 arch/arm/lib/copy_from_user.S      |  6 ++--
 arch/arm/lib/copy_to_user.S        |  6 ++--
 arch/arm/lib/uaccess_with_memcpy.c |  4 +--
 7 files changed, 97 insertions(+), 21 deletions(-)

diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags;				\
 	smp_mb();						\
 	prefetchw(uaddr);					\
+	__ua_flags = uaccess_save_and_enable();			\
 	__asm__ __volatile__(					\
 	"1:	ldrex	%1, [%3]\n"				\
 	"	" insn "\n"					\
@@ -34,12 +37,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret;
 	u32 val;
 
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	smp_mb();
 	/* Prefetching cannot fault */
 	prefetchw(uaddr);
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	: "=&r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 	smp_mb();
 
 	*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags = uaccess_save_and_enable();	\
 	__asm__ __volatile__(					\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"	" insn "\n"					\
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret = 0;
 	u32 val;
 
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 		return -EFAULT;
 
 	preempt_disable();
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	: "+r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 
 	*uval = val;
 	preempt_enable();
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..82880132f941 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,21 @@ struct exception_table_entry
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+	return 0;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+}
+
+/*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
  */
@@ -165,6 +180,7 @@ extern int __get_user_64t_4(void *);
 		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			if (sizeof((x)) >= 8)				\
@@ -192,6 +208,7 @@ extern int __get_user_64t_4(void *);
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		x = (typeof(*(p))) __r2;				\
 		__e;							\
 	})
@@ -224,6 +241,7 @@ extern int __put_user_8(void *, unsigned long long);
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
@@ -239,6 +257,7 @@ extern int __put_user_8(void *, unsigned long long);
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		__e;							\
 	})
 
@@ -300,14 +319,17 @@ static inline void set_fs(mm_segment_t fs)
 do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
+	unsigned int __ua_flags;					\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	default: (__gu_val) = __get_user_bad();				\
 	}								\
+	uaccess_restore(__ua_flags);					\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 } while (0)
 
@@ -381,9 +403,11 @@ do {									\
 #define __put_user_err(x, ptr, err)					\
 do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	unsigned int __ua_flags;					\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
@@ -391,6 +415,7 @@ do {									\
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	default: __put_user_bad();					\
 	}								\
+	uaccess_restore(__ua_flags);					\
 } while (0)
 
 #define __put_user_asm_byte(x, __pu_addr, err)			\
@@ -474,11 +499,46 @@ do {									\
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_from_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_to_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_clear_user(addr, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
@@ -511,6 +571,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
 	return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671cfe1ff..a35d72d30b56 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
 
 		.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		cmp	r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
 		.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -89,11 +89,11 @@
 
 	.text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -93,11 +93,11 @@
 	.text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
 	.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..77f020e75ccd 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	/*
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
 	return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 09/10] ARM: entry: provide uaccess assembly macro hooks
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (7 preceding siblings ...)
  2015-08-25 15:41 ` [PATCH v2 08/10] ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore() Russell King
@ 2015-08-25 15:42 ` Russell King
  2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
  2015-08-25 16:37 ` [PATCH v2 11/10] ARM: fix swp-emulate Russell King
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:42 UTC (permalink / raw)
  To: linux-arm-kernel

Provide hooks into the kernel entry and exit paths to permit control
of userspace visibility to the kernel.  The intended use is:

- on entry to kernel from user, uaccess_disable will be called to
  disable userspace visibility
- on exit from kernel to user, uaccess_enable will be called to
  enable userspace visibility
- on entry from a kernel exception, uaccess_save_and_disable will be
  called to save the current userspace visibility setting, and disable
  access
- on exit from a kernel exception, uaccess_restore will be called to
  restore the userspace visibility as it was before the exception
  occurred.

These hooks allows us to keep userspace visibility disabled for the
vast majority of the kernel, except for localised regions where we
want to explicitly access userspace.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/include/asm/assembler.h | 17 +++++++++++++++++
 arch/arm/kernel/entry-armv.S     | 30 ++++++++++++++++++++++--------
 arch/arm/kernel/entry-common.S   |  2 ++
 arch/arm/kernel/entry-header.S   |  5 +++++
 arch/arm/mm/abort-ev4.S          |  1 +
 arch/arm/mm/abort-ev5t.S         |  1 +
 arch/arm/mm/abort-ev5tj.S        |  1 +
 arch/arm/mm/abort-ev6.S          |  7 ++++---
 arch/arm/mm/abort-ev7.S          |  1 +
 arch/arm/mm/abort-lv4t.S         |  2 ++
 arch/arm/mm/abort-macro.S        |  1 +
 11 files changed, 57 insertions(+), 11 deletions(-)

diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..a91177043467 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -445,6 +445,23 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 #endif
 	.endm
 
+	.macro	uaccess_disable, tmp, isb=1
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+	.endm
+
+	.macro	uaccess_save, tmp
+	.endm
+
+	.macro	uaccess_restore
+	.endm
+
+	.macro	uaccess_save_and_disable, tmp
+	uaccess_save \tmp
+	uaccess_disable \tmp
+	.endm
+
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d19adcf6c580..61f00a3f3047 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
 #define SPFIX(code...)
 #endif
 
-	.macro	svc_entry, stack_hole=0, trace=1
+	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
 	@
 	stmia	r7, {r2 - r6}
 
+	uaccess_save r0
+	.if \uaccess
+	uaccess_disable r0
+	.endif
+
 	.if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
 
 	.align	5
 __dabt_svc:
-	svc_entry
+	svc_entry uaccess=0
 	mov	r2, sp
 	dabt_helper
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-	.macro	usr_entry, trace=1
+	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
  ARM(	stmdb	r0, {sp, lr}^			)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 
+	.if \uaccess
+	uaccess_disable ip
+	.endif
+
 	@ Enable the alignment trap while in kernel mode
  ATRAP(	teq	r8, r7)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
 
 	.align	5
 __dabt_usr:
-	usr_entry
+	usr_entry uaccess=0
 	kuser_cmpxchg_check
 	mov	r2, sp
 	dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
 
 	.align	5
 __und_usr:
-	usr_entry
+	usr_entry uaccess=0
 
 	mov	r2, r4
 	mov	r3, r5
@@ -484,6 +493,8 @@ __und_usr:
 1:	ldrt	r0, [r4]
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
+	uaccess_disable ip
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
 2:	ldrht	r5, [r4]
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
-	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
+	uaccess_disable ip
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
 __und_usr_fault_32:
 	mov	r1, #4
 	b	1f
+__und_usr_fault_16_pan:
+	uaccess_disable ip
 __und_usr_fault_16:
 	mov	r1, #2
 1:	mov	r0, sp
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..189154980703 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -173,6 +173,8 @@ ENTRY(vector_swi)
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 
+	uaccess_disable tbl
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..220ac24900cc 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -215,6 +215,7 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+	uaccess_restore
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -241,6 +242,7 @@
 	@ on the stack remains correct).
 	@
 	.macro  svc_exit_via_fiq
+	uaccess_restore
 	mov	r0, sp
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 				@ clobber state restored below)
@@ -253,6 +255,7 @@
 	.endm
 
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
@@ -329,6 +332,7 @@
 	 * part of each exception entry and exit sequence.
 	 */
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
 	.if	\offset
 	add	sp, #\offset
 	.endif
@@ -336,6 +340,7 @@
 	.endm
 #else	/* ifdef CONFIG_CPU_V7M */
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
 	mov	r2, sp
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	ldr	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index c913031b79cc..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,6 +21,7 @@ ENTRY(v5t_early_abort)
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable user access
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
 	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
 	beq	do_DataAbort			@ yes
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 1b80d71adb0f..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,6 +24,7 @@ ENTRY(v5tj_early_abort)
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
 	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 113704f30e9f..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,17 +26,18 @@ ENTRY(v6_early_abort)
 	ldr	ip, =0x4107b36
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
-	bne	do_DataAbort
+	bne	1f
 	tst	r5, #PSR_J_BIT			@ Java?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
-	bne	do_DataAbort
+	bne	1f
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
 
 	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
-	beq	do_DataAbort			@ yes
+	beq	1f				@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
+1:	uaccess_disable ip			@ disable userspace access
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	uaccess_disable ip			@ disable userspace access
 
 	/*
 	 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
 #endif
 	bne	.data_thumb_abort
 	ldr	r8, [r4]			@ read arm instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
 
 .data_thumb_abort:
 	ldrh	r8, [r4]			@ read instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 8			@ yes
 	and	r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 50d6c0a900b1..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
 	tst	\psr, #PSR_T_BIT
 	beq	not_thumb
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
+	uaccess_disable ip			@ disable userspace access
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (8 preceding siblings ...)
  2015-08-25 15:42 ` [PATCH v2 09/10] ARM: entry: provide uaccess assembly macro hooks Russell King
@ 2015-08-25 15:42 ` Russell King
  2015-08-25 16:53   ` Will Deacon
                     ` (2 more replies)
  2015-08-25 16:37 ` [PATCH v2 11/10] ARM: fix swp-emulate Russell King
  10 siblings, 3 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 15:42 UTC (permalink / raw)
  To: linux-arm-kernel

Provide a software-based implementation of the priviledged no access
support found in ARMv8.1.

Userspace pages are mapped using a different domain number from the
kernel and IO mappings.  If we switch the user domain to "no access"
when we enter the kernel, we can prevent the kernel from touching
userspace.

However, the kernel needs to be able to access userspace via the
various user accessor functions.  With the wrapping in the previous
patch, we can temporarily enable access when the kernel needs user
access, and re-disable it afterwards.

This allows us to trap non-intended accesses to userspace, eg, caused
by an inadvertent dereference of the LIST_POISON* values, which, with
appropriate user mappings setup, can be made to succeed.  This in turn
can allow use-after-free bugs to be further exploited than would
otherwise be possible.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/Kconfig                   | 15 +++++++++++++++
 arch/arm/include/asm/assembler.h   | 30 ++++++++++++++++++++++++++++++
 arch/arm/include/asm/domain.h      | 21 +++++++++++++++++++--
 arch/arm/include/asm/uaccess.h     | 14 ++++++++++++++
 arch/arm/kernel/process.c          | 24 ++++++++++++++++++------
 arch/arm/lib/csumpartialcopyuser.S | 14 ++++++++++++++
 6 files changed, 110 insertions(+), 8 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..a898eb72da51 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1694,6 +1694,21 @@ config HIGHPTE
 	bool "Allocate 2nd-level pagetables from highmem"
 	depends on HIGHMEM
 
+config CPU_SW_DOMAIN_PAN
+	bool "Enable use of CPU domains to implement priviledged no-access"
+	depends on MMU && !ARM_LPAE
+	default y
+	help
+	  Increase kernel security by ensuring that normal kernel accesses
+	  are unable to access userspace addresses.  This can help prevent
+	  use-after-free bugs becoming an exploitable privilege escalation
+	  by ensuring that magic values (such as LIST_POISON) will always
+	  fault when dereferenced.
+
+	  CPUs with low-vector mappings use a best-efforts implementation.
+	  Their lower 1MB needs to remain accessible for the vectors, but
+	  the remainder of userspace will become appropriately inaccessible.
+
 config HW_PERF_EVENTS
 	bool "Enable hardware performance counter support for perf events"
 	depends on PERF_EVENTS
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a91177043467..ff74f0b54b0e 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -446,15 +446,45 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 	.endm
 
 	.macro	uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_DISABLE
+	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
+	.if	\isb
+	isb
+	.endif
+#endif
 	.endm
 
 	.macro	uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_ENABLE
+	mcr	p15, 0, \tmp, c3, c0, 0
+	.if	\isb
+	isb
+	.endif
+#endif
 	.endm
 
 	.macro	uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	mrc	p15, 0, \tmp, c3, c0, 0
+	str	\tmp, [sp, #S_FRAME_SIZE]
+#endif
 	.endm
 
 	.macro	uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	ldr	r0, [sp, #S_FRAME_SIZE]
+	mcr	p15, 0, r0, c3, c0, 0
+#endif
 	.endm
 
 	.macro	uaccess_save_and_disable, tmp
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 2be929549938..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -57,11 +57,29 @@
 #define domain_mask(dom)	((3) << (2 * (dom)))
 #define domain_val(dom,type)	((type) << (2 * (dom)))
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
 #define DACR_INIT \
 	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
 	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+	domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
@@ -76,7 +94,6 @@ static inline unsigned int get_domain(void)
 	return domain;
 }
 
-#ifdef CONFIG_CPU_USE_DOMAINS
 static inline void set_domain(unsigned val)
 {
 	asm volatile(
@@ -85,6 +102,7 @@ static inline void set_domain(unsigned val)
 	isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
 	do {							\
 		unsigned int domain = get_domain();		\
@@ -94,7 +112,6 @@ static inline void set_domain(unsigned val)
 	} while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 #endif
 
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 82880132f941..01bae13b2cea 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs);
  */
 static inline unsigned int uaccess_save_and_enable(void)
 {
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	unsigned int old_domain = get_domain();
+
+	/* Set the current domain access to permit user accesses */
+	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+	return old_domain;
+#else
 	return 0;
+#endif
 }
 
 static inline void uaccess_restore(unsigned int flags)
 {
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/* Restore the user access mask */
+	set_domain(flags);
+#endif
 }
 
 /*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e722f9b3c9b1..b407cc7a7b55 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -129,12 +129,24 @@ void __show_regs(struct pt_regs *regs)
 	buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-		buf, interrupts_enabled(regs) ? "n" : "ff",
-		fast_interrupts_enabled(regs) ? "n" : "ff",
-		processor_modes[processor_mode(regs)],
-		isa_modes[isa_mode(regs)],
-		get_fs() == get_ds() ? "kernel" : "user");
+	{
+		unsigned int domain = get_domain();
+		const char *segment;
+
+		if ((domain & domain_mask(DOMAIN_USER)) ==
+		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+			segment = "none";
+		else if (get_fs() == get_ds())
+			segment = "kernel";
+		else
+			segment = "user";
+
+		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+			buf, interrupts_enabled(regs) ? "n" : "ff",
+			fast_interrupts_enabled(regs) ? "n" : "ff",
+			processor_modes[processor_mode(regs)],
+			isa_modes[isa_mode(regs)], segment);
+	}
 #else
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..52784f6f1086 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
 
 		.text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		.macro	save_regs
+		mrc	p15, 0, r3, c3, c0, 0
+		stmfd	sp!, {r1 - r8, lr}
+		uaccess_enable r3
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1 - r8, lr}
+		mcr	p15, 0, r3, c3, c0, 0
+		ret	lr
+		.endm
+#else
 		.macro	save_regs
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		.endm
@@ -24,6 +37,7 @@
 		.macro	load_regs
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		.endm
+#endif
 
 		.macro	load1b,	reg1
 		ldrusr	\reg1, r0, 1
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 11/10] ARM: fix swp-emulate
  2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
                   ` (9 preceding siblings ...)
  2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
@ 2015-08-25 16:37 ` Russell King
  10 siblings, 0 replies; 24+ messages in thread
From: Russell King @ 2015-08-25 16:37 UTC (permalink / raw)
  To: linux-arm-kernel

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
Local testing found this direct userspace access from the kernel.  I'll
be rolling this into patch 10 tomorrow, but for the time being I'm adding
it on top of the set.

 arch/arm/kernel/swp_emulate.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
 
 	while (1) {
 		unsigned long temp;
+		unsigned int __ua_flags;
 
+		__ua_flags = uaccess_save_and_enable();
 		if (type == TYPE_SWPB)
 			__user_swpb_asm(*data, address, res, temp);
 		else
 			__user_swp_asm(*data, address, res, temp);
+		uaccess_restore(__ua_flags);
 
 		if (likely(res != -EAGAIN) || signal_pending(current))
 			break;
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
@ 2015-08-25 16:53   ` Will Deacon
  2015-08-25 17:07   ` Nicolas Schichan
  2015-10-09  8:28   ` Linus Walleij
  2 siblings, 0 replies; 24+ messages in thread
From: Will Deacon @ 2015-08-25 16:53 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Russell,

On Tue, Aug 25, 2015 at 04:42:08PM +0100, Russell King wrote:
> Provide a software-based implementation of the priviledged no access
> support found in ARMv8.1.
> 
> Userspace pages are mapped using a different domain number from the
> kernel and IO mappings.  If we switch the user domain to "no access"
> when we enter the kernel, we can prevent the kernel from touching
> userspace.
> 
> However, the kernel needs to be able to access userspace via the
> various user accessor functions.  With the wrapping in the previous
> patch, we can temporarily enable access when the kernel needs user
> access, and re-disable it afterwards.
> 
> This allows us to trap non-intended accesses to userspace, eg, caused
> by an inadvertent dereference of the LIST_POISON* values, which, with
> appropriate user mappings setup, can be made to succeed.  This in turn
> can allow use-after-free bugs to be further exploited than would
> otherwise be possible.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  arch/arm/Kconfig                   | 15 +++++++++++++++
>  arch/arm/include/asm/assembler.h   | 30 ++++++++++++++++++++++++++++++
>  arch/arm/include/asm/domain.h      | 21 +++++++++++++++++++--
>  arch/arm/include/asm/uaccess.h     | 14 ++++++++++++++
>  arch/arm/kernel/process.c          | 24 ++++++++++++++++++------
>  arch/arm/lib/csumpartialcopyuser.S | 14 ++++++++++++++
>  6 files changed, 110 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index a750c1425c3a..a898eb72da51 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -1694,6 +1694,21 @@ config HIGHPTE
>  	bool "Allocate 2nd-level pagetables from highmem"
>  	depends on HIGHMEM
>  
> +config CPU_SW_DOMAIN_PAN
> +	bool "Enable use of CPU domains to implement priviledged no-access"

Minor comment, but you've consistently misspelt "privileged".

Anyway, I tried this on my TC2 board running Debian Jessie armhf and,
whilst it boots to a shell on the console, ssh connections appear to
hang on the client before even trying to auth. I don't see anything
like a domain fault and the machine is still responsive on the console.
Disabling this option gets things working again for me.

Note that I *do* see undefined instruction exceptions from sshd
regardless of this patch, however I think they're triggered from
something like libcrypto which is prepared to handle the SIGILL.

FWIW, I'm using your ten patches from this series on top of 4.2-rc8 and
I've put the .config here:

  http://www.willdeacon.ukfsn.org/bitbucket/oopsen/pan/pan-tc2.config

Will

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
  2015-08-25 16:53   ` Will Deacon
@ 2015-08-25 17:07   ` Nicolas Schichan
  2015-08-25 17:48     ` Russell King - ARM Linux
  2015-10-09  8:28   ` Linus Walleij
  2 siblings, 1 reply; 24+ messages in thread
From: Nicolas Schichan @ 2015-08-25 17:07 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/25/2015 05:42 PM, Russell King wrote:
> Provide a software-based implementation of the priviledged no access
> support found in ARMv8.1.
[...]
> diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
> index a91177043467..ff74f0b54b0e 100644
> --- a/arch/arm/include/asm/assembler.h
> +++ b/arch/arm/include/asm/assembler.h
> @@ -446,15 +446,45 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
>  	.endm
>  
>  	.macro	uaccess_disable, tmp, isb=1
> +#ifdef CONFIG_CPU_SW_DOMAIN_PAN
> +	/*
> +	 * Whenever we re-enter userspace, the domains should always be
> +	 * set appropriately.
> +	 */
> +	mov	\tmp, #DACR_UACCESS_DISABLE
> +	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
> +	.if	\isb
> +	isb
> +	.endif
> +#endif
>  	.endm
>  
>  	.macro	uaccess_enable, tmp, isb=1
> +#ifdef CONFIG_CPU_SW_DOMAIN_PAN
> +	/*
> +	 * Whenever we re-enter userspace, the domains should always be
> +	 * set appropriately.
> +	 */
> +	mov	\tmp, #DACR_UACCESS_ENABLE
> +	mcr	p15, 0, \tmp, c3, c0, 0
> +	.if	\isb
> +	isb
> +	.endif
> +#endif
>  	.endm

Thanks for the updated serie,

on ARMv5, I get the following compile error:

arch/arm/kernel/entry-common.S: Assembler messages:
arch/arm/kernel/entry-common.S:200: Error: selected processor does not support
ARM mode `isb'

replacing those two "isb" occurences with "instr_sync" fixed it.

With that added access to LIST_POISON are still correctly catched, when
CONFIG_CPU_SW_DOMAIN_PAN is set. Also the transmit of an ipv6 packet does not
result in a fault anymore. Also with CONFIG_CPU_SW_DOMAIN_PAN disabled, the
system now boots fine.

This has been tested on Linux 4.1 / kirkwood and Linux 4.2-rc8 / qemu,armv5.

Thanks,

-- 
Nicolas Schichan
Freebox SAS

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 17:07   ` Nicolas Schichan
@ 2015-08-25 17:48     ` Russell King - ARM Linux
  2015-08-26 13:36       ` Nicolas Schichan
  0 siblings, 1 reply; 24+ messages in thread
From: Russell King - ARM Linux @ 2015-08-25 17:48 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Aug 25, 2015 at 07:07:39PM +0200, Nicolas Schichan wrote:
> arch/arm/kernel/entry-common.S: Assembler messages:
> arch/arm/kernel/entry-common.S:200: Error: selected processor does not support
> ARM mode `isb'
> 
> replacing those two "isb" occurences with "instr_sync" fixed it.

Thanks, that's exactly what I've done.  I've pushed that and the other
fixes out for linux-next to pick up, hopefully with fewer failures.

This series passed my own build tests (which included building and
booting SDP4430, LDP4430, Versatile Express and iMX6 platforms.)
Unfortunately, they're all Cortex-A8 or A9 platforms.

Olof's builder is showing some build failures in the boot log, but I'll
assume that they're down to the above - I don't yet have the build log,
and that's going to arrive at some point after I've left for a committee
meeting which will extend for most of the evening.  So, I'm hoping that
tonight's linux-next will see improvement rather than deterioration -
that's all I can do at this point... hope.  I'm out of time to do any
more build checking prior to linux-next pulling my tree.

-- 
FTTC broadband for 0.8mile line: currently at 10.5Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 17:48     ` Russell King - ARM Linux
@ 2015-08-26 13:36       ` Nicolas Schichan
  0 siblings, 0 replies; 24+ messages in thread
From: Nicolas Schichan @ 2015-08-26 13:36 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/25/2015 07:48 PM, Russell King - ARM Linux wrote:
> On Tue, Aug 25, 2015 at 07:07:39PM +0200, Nicolas Schichan wrote:
>> arch/arm/kernel/entry-common.S: Assembler messages:
>> arch/arm/kernel/entry-common.S:200: Error: selected processor does not support
>> ARM mode `isb'
>>
>> replacing those two "isb" occurences with "instr_sync" fixed it.
> 
> Thanks, that's exactly what I've done.  I've pushed that and the other
> fixes out for linux-next to pick up, hopefully with fewer failures.

For the code in next-20150826:

Tested-by: Nicolas Schichan <nschichan@freebox.fr>

Thanks,

-- 
Nicolas Schichan
Freebox SAS

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
  2015-08-25 16:53   ` Will Deacon
  2015-08-25 17:07   ` Nicolas Schichan
@ 2015-10-09  8:28   ` Linus Walleij
  2015-10-09 10:53     ` Will Deacon
  2 siblings, 1 reply; 24+ messages in thread
From: Linus Walleij @ 2015-10-09  8:28 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Aug 25, 2015 at 5:42 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Provide a software-based implementation of the priviledged no access
> support found in ARMv8.1.
>
> Userspace pages are mapped using a different domain number from the
> kernel and IO mappings.  If we switch the user domain to "no access"
> when we enter the kernel, we can prevent the kernel from touching
> userspace.
>
> However, the kernel needs to be able to access userspace via the
> various user accessor functions.  With the wrapping in the previous
> patch, we can temporarily enable access when the kernel needs user
> access, and re-disable it afterwards.
>
> This allows us to trap non-intended accesses to userspace, eg, caused
> by an inadvertent dereference of the LIST_POISON* values, which, with
> appropriate user mappings setup, can be made to succeed.  This in turn
> can allow use-after-free bugs to be further exploited than would
> otherwise be possible.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

For some reason this patch explodes on my ARM PB11MPCore, it
is a weird beast and corner case machine so I guess that is why
it wasn't noticed. This happens a bit into the boot when freeing
unused pages:

Freeing unused kernel memory: 2672K (c0448000 - c06e4000)
Unable to handle kernel paging request at virtual address b6f069f4
pgd = c6e58000
[b6f069f4] *pgd=76e09831, *pte=77ff759f, *ppte=77ff7e6e
Internal error: Oops: 17 [#1] SMP ARM
Modules linked in:
CPU: 2 PID: 1 Comm: init Not tainted 4.3.0-rc4-00015-gf6702681a0af #48
Hardware name: ARM-RealView PB11MPCore
task: c7827bc0 ti: c782c000 task.ti: c782c000
PC is at v6wbi_flush_user_tlb_range+0x28/0x48
LR is at on_each_cpu_mask+0x58/0x60
pc : [<c001abf0>]    lr : [<c007c18c>]    psr: 20000093
sp : c782deb8  ip : 00000000  fp : 00000000
r10: c6e5adc8  r9 : 00000001  r8 : b6f02000
r7 : c7a17180  r6 : c782ded4  r5 : c0015118  r4 : 20000013
r3 : 00000002  r2 : 00100075  r1 : b6f02000  r0 : b6f01002
Flags: nzCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM  Segment none
Control: 00c5787d  Table: 76e5800a  DAC: 00000051
Process init (pid: 1, stack limit = 0xc782c190)
Stack: (0xc782deb8 to 0xc782e000)
dea0:                                                       b6f02000 c6e09408
dec0: c6e09404 b6f02000 b6f02000 c0015378 706db5df c7988f50 b6f01000 b6f02000
dee0: 706db55f c00ad710 00000001 b6f02000 b6f01fff c7988f50 00000181 706db5df
df00: c7fd313c c6e5adc0 c7a17020 b6f01000 c79885b0 00000000 c7988f50 00100075
df20: b6f01000 b6f02000 00000000 00100077 c7a17020 c00ad84c 00000000 00000000
df40: c78c7aa0 00000056 00000000 c7a17058 c782df8c 00000001 00000000 b6f02000
df60: b6f02000 00000005 00000001 b6f01000 c782c000 00000000 bee4ab2c c00ada8c
df80: 00100075 00000000 ffffffff c7988f50 b6f2ef78 b6f2c490 00000000 0000007d
dfa0: c000f624 c000f460 b6f2ef78 b6f2c490 b6f01000 00001000 00000001 b6f01cd8
dfc0: b6f2ef78 b6f2c490 00000000 0000007d b6f2ef78 00000004 00000004 bee4ab2c
dfe0: b6f2d2a8 bee4ab18 b6f24eb0 b6f2214c 80000010 b6f01000 45355559 dd550555
[<c001abf0>] (v6wbi_flush_user_tlb_range) from [<b6f01000>] (0xb6f01000)
Code: e20330ff e1830600 e1a01601 e5922028 (ee080f36)
---[ end trace c90cca4faa737700 ]---
Kernel panic - not syncing: Fatal exception
CPU3: stopping
CPU: 3 PID: 0 Comm: swapper/3 Tainted: G      D
4.3.0-rc4-00015-gf6702681a0af #48
Hardware name: ARM-RealView PB11MPCore
[<c0015f64>] (unwind_backtrace) from [<c0012dc0>] (show_stack+0x10/0x14)
[<c0012dc0>] (show_stack) from [<c01778c4>] (dump_stack+0x84/0x9c)
[<c01778c4>] (dump_stack) from [<c0014f24>] (handle_IPI+0x174/0x1b4)
[<c0014f24>] (handle_IPI) from [<c00094b0>] (gic_handle_irq+0x80/0x8c)
[<c00094b0>] (gic_handle_irq) from [<c00138f4>] (__irq_svc+0x54/0x70)
Exception stack(0xc785bf90 to 0xc785bfd8)
bf80:                                     00003228 00000000 00000000 00000000
bfa0: c785a000 c06edac4 00000000 c06eda78 c06e1284 c785bfe8 c033d738 00000001
bfc0: 00000000 c785bfe0 c000ff58 c000ff5c 60000113 ffffffff
[<c00138f4>] (__irq_svc) from [<c000ff5c>] (arch_cpu_idle+0x28/0x30)
[<c000ff5c>] (arch_cpu_idle) from [<c0052c24>] (cpu_startup_entry+0xf8/0x184)
[<c0052c24>] (cpu_startup_entry) from [<70009548>] (0x70009548)
CPU0: stopping
CPU: 0 PID: 0 Comm: swapper/0 Tainted: G      D
4.3.0-rc4-00015-gf6702681a0af #48
Hardware name: ARM-RealView PB11MPCore
[<c0015f64>] (unwind_backtrace) from [<c0012dc0>] (show_stack+0x10/0x14)
[<c0012dc0>] (show_stack) from [<c01778c4>] (dump_stack+0x84/0x9c)
[<c01778c4>] (dump_stack) from [<c0014f24>] (handle_IPI+0x174/0x1b4)
[<c0014f24>] (handle_IPI) from [<c00094b0>] (gic_handle_irq+0x80/0x8c)
[<c00094b0>] (gic_handle_irq) from [<c00138f4>] (__irq_svc+0x54/0x70)
Exception stack(0xc06e5f58 to 0xc06e5fa0)
5f40:                                                       00002fa4 00000000
5f60: 00000000 00000000 c06e4000 c06edac4 00000000 c06eda78 c06e1284 c06e5fb0
5f80: c033d738 00000001 00000000 c06e5fa8 c000ff58 c000ff5c 60000013 ffffffff
[<c00138f4>] (__irq_svc) from [<c000ff5c>] (arch_cpu_idle+0x28/0x30)
[<c000ff5c>] (arch_cpu_idle) from [<c0052c24>] (cpu_startup_entry+0xf8/0x184)
[<c0052c24>] (cpu_startup_entry) from [<c0448bec>] (start_kernel+0x32c/0x3a0)
CPU1: stopping
CPU: 1 PID: 0 Comm: swapper/1 Tainted: G      D
4.3.0-rc4-00015-gf6702681a0af #48
Hardware name: ARM-RealView PB11MPCore
[<c0015f64>] (unwind_backtrace) from [<c0012dc0>] (show_stack+0x10/0x14)
[<c0012dc0>] (show_stack) from [<c01778c4>] (dump_stack+0x84/0x9c)
[<c01778c4>] (dump_stack) from [<c0014f24>] (handle_IPI+0x174/0x1b4)
[<c0014f24>] (handle_IPI) from [<c00094b0>] (gic_handle_irq+0x80/0x8c)
[<c00094b0>] (gic_handle_irq) from [<c00138f4>] (__irq_svc+0x54/0x70)
Exception stack(0xc7857f90 to 0xc7857fd8)
7f80:                                     0000290a 00000000 00000000 00000000
7fa0: c7856000 c06edac4 00000000 c06eda78 c06e1284 c7857fe8 c033d738 00000001
7fc0: 00000000 c7857fe0 c000ff58 c000ff5c 60000113 ffffffff
[<c00138f4>] (__irq_svc) from [<c000ff5c>] (arch_cpu_idle+0x28/0x30)
[<c000ff5c>] (arch_cpu_idle) from [<c0052c24>] (cpu_startup_entry+0xf8/0x184)
[<c0052c24>] (cpu_startup_entry) from [<70009548>] (0x70009548)
---[ end Kernel panic - not syncing: Fatal exception

(I configured to treat oops as panic so it takes down all CPUs.)

Sometimes I get this instead, earlier:

INFO: rcu_sched detected stalls on CPUs/tasks:
        1: (0 ticks this GP) idle=8af/140000000000000/0 softirq=242/244 fqs=1373
        (detected by 0, t=2103 jiffies, g=-256, c=-257, q=235)
Task dump for CPU 1:
modprobe        R running      0   351    350 0x00000002
[<c032eab4>] (__schedule) from [<c00a2734>] (handle_mm_fault+0x978/0xa9c)
[<c00a2734>] (handle_mm_fault) from [<c0017218>] (do_page_fault+0x1e0/0x2a4)
[<c0017218>] (do_page_fault) from [<c0009310>] (do_DataAbort+0x34/0xb4)
[<c0009310>] (do_DataAbort) from [<c001361c>] (__dabt_usr+0x3c/0x40)
Exception stack(0xc698dfb0 to 0xc698dff8)
dfa0:                                     b6f7cd2c 00000020 0000eed4 b6f7d450
dfc0: b6f7c000 b6f8af78 00000000 00000000 b6f82040 6defe040 be903ec4 be903ebc
dfe0: 00000108 be903cf0 00000021 b6f81490 20000010 ffffffff

Reverting the patch makes everything boot smoothly again.

Feeling kind of clueless on where the problem may be, the first backtrace
seem to be in pure assembly so I'm a bit lost. The second one from
RCU is a bit more clear but I don't know the context of how this is
affected by the patch. Been scratching my head for a while...

Any ideas?

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-09  8:28   ` Linus Walleij
@ 2015-10-09 10:53     ` Will Deacon
  2015-10-09 11:24       ` Russell King - ARM Linux
  0 siblings, 1 reply; 24+ messages in thread
From: Will Deacon @ 2015-10-09 10:53 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 09, 2015 at 10:28:14AM +0200, Linus Walleij wrote:
> On Tue, Aug 25, 2015 at 5:42 PM, Russell King
> <rmk+kernel@arm.linux.org.uk> wrote:
> 
> > Provide a software-based implementation of the priviledged no access
> > support found in ARMv8.1.
> >
> > Userspace pages are mapped using a different domain number from the
> > kernel and IO mappings.  If we switch the user domain to "no access"
> > when we enter the kernel, we can prevent the kernel from touching
> > userspace.
> >
> > However, the kernel needs to be able to access userspace via the
> > various user accessor functions.  With the wrapping in the previous
> > patch, we can temporarily enable access when the kernel needs user
> > access, and re-disable it afterwards.
> >
> > This allows us to trap non-intended accesses to userspace, eg, caused
> > by an inadvertent dereference of the LIST_POISON* values, which, with
> > appropriate user mappings setup, can be made to succeed.  This in turn
> > can allow use-after-free bugs to be further exploited than would
> > otherwise be possible.
> >
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> For some reason this patch explodes on my ARM PB11MPCore, it
> is a weird beast and corner case machine so I guess that is why
> it wasn't noticed. This happens a bit into the boot when freeing
> unused pages:
> 
> Freeing unused kernel memory: 2672K (c0448000 - c06e4000)
> Unable to handle kernel paging request at virtual address b6f069f4
> pgd = c6e58000
> [b6f069f4] *pgd=76e09831, *pte=77ff759f, *ppte=77ff7e6e
> Internal error: Oops: 17 [#1] SMP ARM
> Modules linked in:
> CPU: 2 PID: 1 Comm: init Not tainted 4.3.0-rc4-00015-gf6702681a0af #48
> Hardware name: ARM-RealView PB11MPCore
> task: c7827bc0 ti: c782c000 task.ti: c782c000
> PC is at v6wbi_flush_user_tlb_range+0x28/0x48
> LR is at on_each_cpu_mask+0x58/0x60
> pc : [<c001abf0>]    lr : [<c007c18c>]    psr: 20000093
> sp : c782deb8  ip : 00000000  fp : 00000000
> r10: c6e5adc8  r9 : 00000001  r8 : b6f02000
> r7 : c7a17180  r6 : c782ded4  r5 : c0015118  r4 : 20000013
> r3 : 00000002  r2 : 00100075  r1 : b6f02000  r0 : b6f01002
> Flags: nzCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM  Segment none
> Control: 00c5787d  Table: 76e5800a  DAC: 00000051

It looks like we're faulting on the TLBI instruction, because it's
targetting a userspace address (r0 == 0xb6f01002) and the DAC prohibits
access to userspace. It's weird that this only seems to happen on 11MPCore
though; if this core was one of the guys getting cross-called, then I
could understand the bug, but the lr suggests that CPU 2 is initiating
the flush, so I'd expect the same problem to appear on any ARMv6 part.

Russell, have you tried the s/w PAN stuff on any v6 CPUs?

Will

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-09 10:53     ` Will Deacon
@ 2015-10-09 11:24       ` Russell King - ARM Linux
  2015-10-09 12:32         ` Will Deacon
                           ` (2 more replies)
  0 siblings, 3 replies; 24+ messages in thread
From: Russell King - ARM Linux @ 2015-10-09 11:24 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 09, 2015 at 11:53:09AM +0100, Will Deacon wrote:
> On Fri, Oct 09, 2015 at 10:28:14AM +0200, Linus Walleij wrote:
> > On Tue, Aug 25, 2015 at 5:42 PM, Russell King
> > <rmk+kernel@arm.linux.org.uk> wrote:
> > 
> > > Provide a software-based implementation of the priviledged no access
> > > support found in ARMv8.1.
> > >
> > > Userspace pages are mapped using a different domain number from the
> > > kernel and IO mappings.  If we switch the user domain to "no access"
> > > when we enter the kernel, we can prevent the kernel from touching
> > > userspace.
> > >
> > > However, the kernel needs to be able to access userspace via the
> > > various user accessor functions.  With the wrapping in the previous
> > > patch, we can temporarily enable access when the kernel needs user
> > > access, and re-disable it afterwards.
> > >
> > > This allows us to trap non-intended accesses to userspace, eg, caused
> > > by an inadvertent dereference of the LIST_POISON* values, which, with
> > > appropriate user mappings setup, can be made to succeed.  This in turn
> > > can allow use-after-free bugs to be further exploited than would
> > > otherwise be possible.
> > >
> > > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> > 
> > For some reason this patch explodes on my ARM PB11MPCore, it
> > is a weird beast and corner case machine so I guess that is why
> > it wasn't noticed. This happens a bit into the boot when freeing
> > unused pages:
> > 
> > Freeing unused kernel memory: 2672K (c0448000 - c06e4000)
> > Unable to handle kernel paging request at virtual address b6f069f4
> > pgd = c6e58000
> > [b6f069f4] *pgd=76e09831, *pte=77ff759f, *ppte=77ff7e6e
> > Internal error: Oops: 17 [#1] SMP ARM
> > Modules linked in:
> > CPU: 2 PID: 1 Comm: init Not tainted 4.3.0-rc4-00015-gf6702681a0af #48
> > Hardware name: ARM-RealView PB11MPCore
> > task: c7827bc0 ti: c782c000 task.ti: c782c000
> > PC is at v6wbi_flush_user_tlb_range+0x28/0x48
> > LR is at on_each_cpu_mask+0x58/0x60
> > pc : [<c001abf0>]    lr : [<c007c18c>]    psr: 20000093
> > sp : c782deb8  ip : 00000000  fp : 00000000
> > r10: c6e5adc8  r9 : 00000001  r8 : b6f02000
> > r7 : c7a17180  r6 : c782ded4  r5 : c0015118  r4 : 20000013
> > r3 : 00000002  r2 : 00100075  r1 : b6f02000  r0 : b6f01002
> > Flags: nzCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM  Segment none
> > Control: 00c5787d  Table: 76e5800a  DAC: 00000051
> 
> It looks like we're faulting on the TLBI instruction, because it's
> targetting a userspace address (r0 == 0xb6f01002) and the DAC prohibits
> access to userspace.

That's kind'a weird - I'd have expected _cache_ operations to fault,
but not TLB operations.  From a quick search of the ARM ARM, I can't
find anything that says that TLB operations are allowed to fault.

> It's weird that this only seems to happen on 11MPCore
> though; if this core was one of the guys getting cross-called, then I
> could understand the bug, but the lr suggests that CPU 2 is initiating
> the flush, so I'd expect the same problem to appear on any ARMv6 part.

It sounds to me like a CPU bug, but one which we need to work around.
ipi_flush_tlb_range() will be the function concerned, we need to
save-and-enable, and then restore the user access state around that
call.

> Russell, have you tried the s/w PAN stuff on any v6 CPUs?

No.  I have considered having the Realview EB board as part of the test
farm, but as that board is hassle to get going, I deem the hardware to
be too unreliable for that.  (I reported the problem at the time.)

Linus, can you try the patch below to see if it resolves the problem
you're seeing please?

 arch/arm/kernel/smp_tlb.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 2e72be4f623e..7cb079e74010 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
  */
 #include <linux/preempt.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
 static inline void ipi_flush_tlb_page(void *arg)
 {
 	struct tlb_args *ta = (struct tlb_args *)arg;
+	unsigned int __ua_flags = uaccess_save_and_enable();
 
 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+	uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
 static inline void ipi_flush_tlb_range(void *arg)
 {
 	struct tlb_args *ta = (struct tlb_args *)arg;
+	unsigned int __ua_flags = uaccess_save_and_enable();
 
 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+	uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_range(void *arg)


-- 
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-09 11:24       ` Russell King - ARM Linux
@ 2015-10-09 12:32         ` Will Deacon
  2015-10-12  7:51         ` Linus Walleij
  2015-10-23  8:05         ` Linus Walleij
  2 siblings, 0 replies; 24+ messages in thread
From: Will Deacon @ 2015-10-09 12:32 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 09, 2015 at 12:24:18PM +0100, Russell King - ARM Linux wrote:
> On Fri, Oct 09, 2015 at 11:53:09AM +0100, Will Deacon wrote:
> > It looks like we're faulting on the TLBI instruction, because it's
> > targetting a userspace address (r0 == 0xb6f01002) and the DAC prohibits
> > access to userspace.
> 
> That's kind'a weird - I'd have expected _cache_ operations to fault,
> but not TLB operations.  From a quick search of the ARM ARM, I can't
> find anything that says that TLB operations are allowed to fault.

Yes, you're right. I was concentrating on the cross-call scenario and
forgot this was TLB not cache maintenance, so faulting is indeed bizarre.

> > It's weird that this only seems to happen on 11MPCore
> > though; if this core was one of the guys getting cross-called, then I
> > could understand the bug, but the lr suggests that CPU 2 is initiating
> > the flush, so I'd expect the same problem to appear on any ARMv6 part.
> 
> It sounds to me like a CPU bug, but one which we need to work around.
> ipi_flush_tlb_range() will be the function concerned, we need to
> save-and-enable, and then restore the user access state around that
> call.
> 
> > Russell, have you tried the s/w PAN stuff on any v6 CPUs?
> 
> No.  I have considered having the Realview EB board as part of the test
> farm, but as that board is hassle to get going, I deem the hardware to
> be too unreliable for that.  (I reported the problem at the time.)
> 
> Linus, can you try the patch below to see if it resolves the problem
> you're seeing please?

If this fixes things, I'll dig up the errata document to see if this is
a known hardware issue.

Will

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-09 11:24       ` Russell King - ARM Linux
  2015-10-09 12:32         ` Will Deacon
@ 2015-10-12  7:51         ` Linus Walleij
  2015-10-23  8:05         ` Linus Walleij
  2 siblings, 0 replies; 24+ messages in thread
From: Linus Walleij @ 2015-10-12  7:51 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 9, 2015 at 1:24 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:

> Linus, can you try the patch below to see if it resolves the problem
> you're seeing please?
>
>  arch/arm/kernel/smp_tlb.c | 7 +++++++
>  1 file changed, 7 insertions(+)
>
> diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
> index 2e72be4f623e..7cb079e74010 100644
> --- a/arch/arm/kernel/smp_tlb.c
> +++ b/arch/arm/kernel/smp_tlb.c
> @@ -9,6 +9,7 @@
>   */
>  #include <linux/preempt.h>
>  #include <linux/smp.h>
> +#include <linux/uaccess.h>
>
>  #include <asm/smp_plat.h>
>  #include <asm/tlbflush.h>
> @@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
>  static inline void ipi_flush_tlb_page(void *arg)
>  {
>         struct tlb_args *ta = (struct tlb_args *)arg;
> +       unsigned int __ua_flags = uaccess_save_and_enable();
>
>         local_flush_tlb_page(ta->ta_vma, ta->ta_start);
> +
> +       uaccess_restore(__ua_flags);
>  }
>
>  static inline void ipi_flush_tlb_kernel_page(void *arg)
> @@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
>  static inline void ipi_flush_tlb_range(void *arg)
>  {
>         struct tlb_args *ta = (struct tlb_args *)arg;
> +       unsigned int __ua_flags = uaccess_save_and_enable();
>
>         local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
> +
> +       uaccess_restore(__ua_flags);
>  }

Yes this makes it rock solid again.
Tested-by: Linus Walleij <linus.walleij@linaro.org>

I guess it will only affect ARMv6 SMP (11MPCore) machines and there
are not so many of them AFAICT, so I guess it's a bit annoying to have
that in the ipi_flush_tlb_kernel_page() for all of the ARM CPUs :(
but it does work.

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-09 11:24       ` Russell King - ARM Linux
  2015-10-09 12:32         ` Will Deacon
  2015-10-12  7:51         ` Linus Walleij
@ 2015-10-23  8:05         ` Linus Walleij
  2015-10-23  8:46           ` Russell King - ARM Linux
  2 siblings, 1 reply; 24+ messages in thread
From: Linus Walleij @ 2015-10-23  8:05 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 9, 2015 at 1:24 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
>  [Will]
>> It's weird that this only seems to happen on 11MPCore
>> though; if this core was one of the guys getting cross-called, then I
>> could understand the bug, but the lr suggests that CPU 2 is initiating
>> the flush, so I'd expect the same problem to appear on any ARMv6 part.
>
> It sounds to me like a CPU bug, but one which we need to work around.
> ipi_flush_tlb_range() will be the function concerned, we need to
> save-and-enable, and then restore the user access state around that
> call.
>
>> Russell, have you tried the s/w PAN stuff on any v6 CPUs?
>
> No.  I have considered having the Realview EB board as part of the test
> farm, but as that board is hassle to get going, I deem the hardware to
> be too unreliable for that.  (I reported the problem at the time.)
>
> Linus, can you try the patch below to see if it resolves the problem
> you're seeing please?
>
>  arch/arm/kernel/smp_tlb.c | 7 +++++++
>  1 file changed, 7 insertions(+)
>
> diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
> index 2e72be4f623e..7cb079e74010 100644
> --- a/arch/arm/kernel/smp_tlb.c
> +++ b/arch/arm/kernel/smp_tlb.c
> @@ -9,6 +9,7 @@
>   */
>  #include <linux/preempt.h>
>  #include <linux/smp.h>
> +#include <linux/uaccess.h>
>
>  #include <asm/smp_plat.h>
>  #include <asm/tlbflush.h>
> @@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
>  static inline void ipi_flush_tlb_page(void *arg)
>  {
>         struct tlb_args *ta = (struct tlb_args *)arg;
> +       unsigned int __ua_flags = uaccess_save_and_enable();
>
>         local_flush_tlb_page(ta->ta_vma, ta->ta_start);
> +
> +       uaccess_restore(__ua_flags);
>  }
>
>  static inline void ipi_flush_tlb_kernel_page(void *arg)
> @@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
>  static inline void ipi_flush_tlb_range(void *arg)
>  {
>         struct tlb_args *ta = (struct tlb_args *)arg;
> +       unsigned int __ua_flags = uaccess_save_and_enable();
>
>         local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
> +
> +       uaccess_restore(__ua_flags);
>  }
>
>  static inline void ipi_flush_tlb_kernel_range(void *arg)

Do we have a solution for this?

I'm carrying the patch and v4.3-rc6 is broken on upstream
RealView PB11MPCore, at least for me. :(

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-23  8:05         ` Linus Walleij
@ 2015-10-23  8:46           ` Russell King - ARM Linux
  2015-10-27 17:11             ` Will Deacon
  0 siblings, 1 reply; 24+ messages in thread
From: Russell King - ARM Linux @ 2015-10-23  8:46 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 23, 2015 at 10:05:06AM +0200, Linus Walleij wrote:
> Do we have a solution for this?
> 
> I'm carrying the patch and v4.3-rc6 is broken on upstream
> RealView PB11MPCore, at least for me. :(

Will was going to do some research and get back to me, but what he was
going to look at has been archived off-site.  Will said he wanted to
reproduce it on the latest silicon, but I haven't heard anything
further and he hasn't been around this week.

-- 
FTTC broadband for 0.8mile line: currently at 9.6Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH v2 10/10] ARM: software-based priviledged-no-access support
  2015-10-23  8:46           ` Russell King - ARM Linux
@ 2015-10-27 17:11             ` Will Deacon
  0 siblings, 0 replies; 24+ messages in thread
From: Will Deacon @ 2015-10-27 17:11 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Oct 23, 2015 at 09:46:40AM +0100, Russell King - ARM Linux wrote:
> On Fri, Oct 23, 2015 at 10:05:06AM +0200, Linus Walleij wrote:
> > Do we have a solution for this?
> > 
> > I'm carrying the patch and v4.3-rc6 is broken on upstream
> > RealView PB11MPCore, at least for me. :(
> 
> Will was going to do some research and get back to me, but what he was
> going to look at has been archived off-site.  Will said he wanted to
> reproduce it on the latest silicon, but I haven't heard anything
> further and he hasn't been around this week.

I'm afraid my digging didn't prove to be very enlightening. One thing I'd
like to try, is to see if we can reproduce the problem on an 11MPCore
platform with a CPU > r0p0. However, I don't have access to such a platform
that can run mainline, so without that I think we have to go with Russell's
workaround predicated on a check for 11MPCore.

Will

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2015-10-27 17:11 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-08-25 15:40 [PATCH v2 00/10] Prevent list poison values from being mapped by userspace processes Russell King - ARM Linux
2015-08-25 15:41 ` [PATCH v2 01/10] ARM: domains: switch to keeping domain value in register Russell King
2015-08-25 15:41 ` [PATCH v2 02/10] ARM: domains: provide domain_mask() Russell King
2015-08-25 15:41 ` [PATCH v2 03/10] ARM: domains: move initial domain setting value to asm/domains.h Russell King
2015-08-25 15:41 ` [PATCH v2 04/10] ARM: domains: get rid of manager mode for user domain Russell King
2015-08-25 15:41 ` [PATCH v2 05/10] ARM: domains: keep vectors in separate domain Russell King
2015-08-25 15:41 ` [PATCH v2 06/10] ARM: domains: remove DOMAIN_TABLE Russell King
2015-08-25 15:41 ` [PATCH v2 07/10] ARM: mm: improve do_ldrd_abort macro Russell King
2015-08-25 15:41 ` [PATCH v2 08/10] ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore() Russell King
2015-08-25 15:42 ` [PATCH v2 09/10] ARM: entry: provide uaccess assembly macro hooks Russell King
2015-08-25 15:42 ` [PATCH v2 10/10] ARM: software-based priviledged-no-access support Russell King
2015-08-25 16:53   ` Will Deacon
2015-08-25 17:07   ` Nicolas Schichan
2015-08-25 17:48     ` Russell King - ARM Linux
2015-08-26 13:36       ` Nicolas Schichan
2015-10-09  8:28   ` Linus Walleij
2015-10-09 10:53     ` Will Deacon
2015-10-09 11:24       ` Russell King - ARM Linux
2015-10-09 12:32         ` Will Deacon
2015-10-12  7:51         ` Linus Walleij
2015-10-23  8:05         ` Linus Walleij
2015-10-23  8:46           ` Russell King - ARM Linux
2015-10-27 17:11             ` Will Deacon
2015-08-25 16:37 ` [PATCH v2 11/10] ARM: fix swp-emulate Russell King

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.