linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] Kernel Userspace Protection for Radix MMU
@ 2018-11-22 14:04 Russell Currey
  2018-11-22 14:04 ` [PATCH 1/4] powerpc: Track KUAP state in the PACA Russell Currey
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Russell Currey @ 2018-11-22 14:04 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: kernel-hardening, Russell Currey

Back again, this time based on top of Christophe Leroy's series:
http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=74541

With the magic perk of being the snowpatch maintainer I will try and get
the snowpatch results for this series to be applied on top of that series
so all the tests aren't failures.

There aren't really any major functional changes, just working what I
already did into Christophe's framework.  The biggest change is that
execution prevention is now optional, and all the radix-specific code
is now in the radix-specific place where it should be.

Russell Currey (4):
  powerpc: Track KUAP state in the PACA
  powerpc/64: Setup KUP before feature fixups
  powerpc/mm/radix: Use KUEP API for Radix MMU
  powerpc/64s: Implement KUAP for Radix MMU

 arch/powerpc/include/asm/book3s/64/radix.h   | 43 ++++++++++++++++++++
 arch/powerpc/include/asm/exception-64e.h     |  3 ++
 arch/powerpc/include/asm/exception-64s.h     | 19 ++++++++-
 arch/powerpc/include/asm/mmu.h               |  9 +++-
 arch/powerpc/include/asm/nohash/32/pte-8xx.h |  8 ++--
 arch/powerpc/include/asm/paca.h              |  3 ++
 arch/powerpc/include/asm/reg.h               |  1 +
 arch/powerpc/include/asm/uaccess.h           | 23 ++++++++++-
 arch/powerpc/kernel/asm-offsets.c            |  1 +
 arch/powerpc/kernel/entry_64.S               | 16 +++++++-
 arch/powerpc/kernel/setup_64.c               |  7 +++-
 arch/powerpc/mm/pgtable-radix.c              | 21 ++++++++--
 arch/powerpc/mm/pkeys.c                      |  7 +++-
 arch/powerpc/platforms/Kconfig.cputype       |  2 +
 14 files changed, 148 insertions(+), 15 deletions(-)

-- 
2.19.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/4] powerpc: Track KUAP state in the PACA
  2018-11-22 14:04 [PATCH 0/4] Kernel Userspace Protection for Radix MMU Russell Currey
@ 2018-11-22 14:04 ` Russell Currey
  2018-11-28  9:38   ` Christophe Leroy
  2018-11-22 14:04 ` [PATCH 2/4] powerpc/64: Setup KUP before feature fixups Russell Currey
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Russell Currey @ 2018-11-22 14:04 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: kernel-hardening, Russell Currey

Necessary for subsequent patches that enable KUAP support for radix.
Could plausibly be useful for other platforms too, if similar to the
radix case, reading the register that manages these accesses is
costly.

Has the unfortunate downside of another layer of abstraction for
platforms that implement the locks and unlocks, but this could be
useful in future for other things too, like counters for benchmarking
or smartly handling lots of small accesses at once.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
this is all because I can't do PACA things from radix.h and I spent
an hour figuring this out at midnight
---
 arch/powerpc/include/asm/nohash/32/pte-8xx.h |  8 +++----
 arch/powerpc/include/asm/paca.h              |  3 +++
 arch/powerpc/include/asm/uaccess.h           | 23 +++++++++++++++++++-
 arch/powerpc/kernel/asm-offsets.c            |  1 +
 4 files changed, 30 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index f1ec7cf949d5..7bc0955a56e9 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -137,22 +137,22 @@ static inline pte_t pte_mkhuge(pte_t pte)
 #define pte_mkhuge pte_mkhuge
 
 #ifdef CONFIG_PPC_KUAP
-static inline void lock_user_wr_access(void)
+static inline void __lock_user_wr_access(void)
 {
 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 
-static inline void unlock_user_wr_access(void)
+static inline void __unlock_user_wr_access(void)
 {
 	mtspr(SPRN_MD_AP, MD_APG_INIT);
 }
 
-static inline void lock_user_rd_access(void)
+static inline void __lock_user_rd_access(void)
 {
 	mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 
-static inline void unlock_user_rd_access(void)
+static inline void __unlock_user_rd_access(void)
 {
 	mtspr(SPRN_MD_AP, MD_APG_INIT);
 }
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e843bc5d1a0f..56236f6d8c89 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -169,6 +169,9 @@ struct paca_struct {
 	u64 saved_r1;			/* r1 save for RTAS calls or PM or EE=0 */
 	u64 saved_msr;			/* MSR saved here by enter_rtas */
 	u16 trap_save;			/* Used when bad stack is encountered */
+#ifdef CONFIG_PPC_KUAP
+	u8 user_access_allowed;		/* can the kernel access user memory? */
+#endif
 	u8 irq_soft_mask;		/* mask for irq soft masking */
 	u8 irq_happened;		/* irq happened while soft-disabled */
 	u8 io_sync;			/* writel() needs spin_unlock sync */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 2f3625cbfcee..76dae1095f7e 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -63,7 +63,28 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
 
 #endif
 
-#ifndef CONFIG_PPC_KUAP
+#ifdef CONFIG_PPC_KUAP
+static inline void unlock_user_rd_access(void)
+{
+	__unlock_user_rd_access();
+	get_paca()->user_access_allowed = 1;
+}
+static inline void lock_user_rd_access(void)
+{
+	__lock_user_rd_access();
+	get_paca()->user_access_allowed = 0;
+}
+static inline void unlock_user_wr_access(void)
+{
+	__unlock_user_wr_access();
+	get_paca()->user_access_allowed = 1;
+}
+static inline void lock_user_wr_access(void)
+{
+	__lock_user_wr_access();
+	get_paca()->user_access_allowed = 0;
+}
+#else
 static inline void unlock_user_rd_access(void) { }
 static inline void lock_user_rd_access(void) { }
 static inline void unlock_user_wr_access(void) { }
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index da2f5d011ddb..899e9835b45f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -260,6 +260,7 @@ int main(void)
 	OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
 	OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
 	OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
+	OFFSET(PACA_USER_ACCESS_ALLOWED, paca_struct, user_access_allowed);
 	OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
 	OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
 	OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/4] powerpc/64: Setup KUP before feature fixups
  2018-11-22 14:04 [PATCH 0/4] Kernel Userspace Protection for Radix MMU Russell Currey
  2018-11-22 14:04 ` [PATCH 1/4] powerpc: Track KUAP state in the PACA Russell Currey
@ 2018-11-22 14:04 ` Russell Currey
  2018-11-28  9:38   ` Christophe Leroy
  2018-11-22 14:04 ` [PATCH 3/4] powerpc/mm/radix: Use KUEP API for Radix MMU Russell Currey
  2018-11-22 14:04 ` [PATCH 4/4] powerpc/64s: Implement KUAP " Russell Currey
  3 siblings, 1 reply; 8+ messages in thread
From: Russell Currey @ 2018-11-22 14:04 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: kernel-hardening, Russell Currey

The subsequent implementation of KUAP for radix makes use of a MMU
feature in order to patch out assembly when KUAP is disabled or
unsupported.  This won't work unless there's an entry point for
KUP support before the feature magic happens, so relocate
setup_kup() earlier in setup.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
 arch/powerpc/kernel/setup_64.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0f4e06ab70a5..cc20dc3e7b69 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -331,6 +331,12 @@ void __init early_setup(unsigned long dt_ptr)
 	 */
 	configure_exceptions();
 
+	/*
+	 * Configure Kernel Userspace Protection. This needs to happen before
+	 * feature fixups for platforms that implement this using features.
+	 */
+	setup_kup();
+
 	/* Apply all the dynamic patching */
 	apply_feature_fixups();
 	setup_feature_keys();
@@ -372,7 +378,6 @@ void __init early_setup(unsigned long dt_ptr)
 	 */
 	btext_map();
 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
-	setup_kup();
 }
 
 #ifdef CONFIG_SMP
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/4] powerpc/mm/radix: Use KUEP API for Radix MMU
  2018-11-22 14:04 [PATCH 0/4] Kernel Userspace Protection for Radix MMU Russell Currey
  2018-11-22 14:04 ` [PATCH 1/4] powerpc: Track KUAP state in the PACA Russell Currey
  2018-11-22 14:04 ` [PATCH 2/4] powerpc/64: Setup KUP before feature fixups Russell Currey
@ 2018-11-22 14:04 ` Russell Currey
  2018-11-22 14:04 ` [PATCH 4/4] powerpc/64s: Implement KUAP " Russell Currey
  3 siblings, 0 replies; 8+ messages in thread
From: Russell Currey @ 2018-11-22 14:04 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: kernel-hardening, Russell Currey

Execution protection already exists on radix, this just refactors
the radix init to provide the KUEP setup function instead.

Thus, the only functional change is that it can now be disabled.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
 arch/powerpc/mm/pgtable-radix.c        | 9 ++++++---
 arch/powerpc/platforms/Kconfig.cputype | 1 +
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 931156069a81..f08a459b4255 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -535,8 +535,13 @@ static void radix_init_amor(void)
 	mtspr(SPRN_AMOR, (3ul << 62));
 }
 
-static void radix_init_iamr(void)
+void setup_kuep(bool disabled)
 {
+	if (disabled)
+		return;
+
+	pr_warn("Activating Kernel Userspace Execution Prevention\n");
+
 	/*
 	 * Radix always uses key0 of the IAMR to determine if an access is
 	 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
@@ -605,7 +610,6 @@ void __init radix__early_init_mmu(void)
 
 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 
-	radix_init_iamr();
 	radix_init_pgtable();
 	/* Switch to the guard PID before turning on MMU */
 	radix__switch_mmu_context(NULL, &init_mm);
@@ -627,7 +631,6 @@ void radix__early_init_mmu_secondary(void)
 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
 		radix_init_amor();
 	}
-	radix_init_iamr();
 
 	radix__switch_mmu_context(NULL, &init_mm);
 	if (cpu_has_feature(CPU_FTR_HVMODE))
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a20669a9ec13..e6831d0ec159 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -334,6 +334,7 @@ config PPC_RADIX_MMU
 	bool "Radix MMU Support"
 	depends on PPC_BOOK3S_64
 	select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+	select PPC_HAVE_KUEP
 	default y
 	help
 	  Enable support for the Power ISA 3.0 Radix style MMU. Currently this
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/4] powerpc/64s: Implement KUAP for Radix MMU
  2018-11-22 14:04 [PATCH 0/4] Kernel Userspace Protection for Radix MMU Russell Currey
                   ` (2 preceding siblings ...)
  2018-11-22 14:04 ` [PATCH 3/4] powerpc/mm/radix: Use KUEP API for Radix MMU Russell Currey
@ 2018-11-22 14:04 ` Russell Currey
  2018-11-28  9:39   ` Christophe Leroy
  3 siblings, 1 reply; 8+ messages in thread
From: Russell Currey @ 2018-11-22 14:04 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: kernel-hardening, Russell Currey

Kernel Userspace Access Prevention utilises a feature of
the Radix MMU which disallows read and write access to userspace
addresses.  By utilising this, the kernel is prevented from accessing
user data from outside of trusted paths that perform proper safety checks,
such as copy_{to/from}_user() and friends.

Userspace access is disabled from early boot and is only enabled when:

        - exiting the kernel and entering userspace
        - performing an operation like copy_{to/from}_user()
        - context switching to a process that has access enabled

and similarly, access is disabled again when exiting userspace and entering
the kernel.

This feature has a slight performance impact which I roughly measured to be
3% slower in the worst case (performing 1GB of 1 byte read()/write()
syscalls), and is gated behind the CONFIG_PPC_KUAP option for
performance-critical builds.

This feature can be tested by using the lkdtm driver (CONFIG_LKDTM=y) and
performing the following:

        echo ACCESS_USERSPACE > [debugfs]/provoke-crash/DIRECT

if enabled, this should send SIGSEGV to the thread.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
 arch/powerpc/include/asm/book3s/64/radix.h | 43 ++++++++++++++++++++++
 arch/powerpc/include/asm/exception-64e.h   |  3 ++
 arch/powerpc/include/asm/exception-64s.h   | 19 +++++++++-
 arch/powerpc/include/asm/mmu.h             |  9 ++++-
 arch/powerpc/include/asm/reg.h             |  1 +
 arch/powerpc/kernel/entry_64.S             | 16 +++++++-
 arch/powerpc/mm/pgtable-radix.c            | 12 ++++++
 arch/powerpc/mm/pkeys.c                    |  7 +++-
 arch/powerpc/platforms/Kconfig.cputype     |  1 +
 9 files changed, 105 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 7d1a3d1543fc..9af93d05e6fa 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -284,5 +284,48 @@ static inline unsigned long radix__get_tree_size(void)
 int radix__create_section_mapping(unsigned long start, unsigned long end, int nid);
 int radix__remove_section_mapping(unsigned long start, unsigned long end);
 #endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_PPC_KUAP
+#include <asm/reg.h>
+/*
+ * We do have the ability to individually lock/unlock reads and writes rather
+ * than both at once, however it's a significant performance hit due to needing
+ * to do a read-modify-write, which adds a mfspr, which is slow.  As a result,
+ * locking/unlocking both at once is preferred.
+ */
+static inline void __unlock_user_rd_access(void)
+{
+	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
+		return;
+
+	mtspr(SPRN_AMR, 0);
+	isync();
+}
+
+static inline void __lock_user_rd_access(void)
+{
+	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
+		return;
+
+	mtspr(SPRN_AMR, AMR_LOCKED);
+}
+
+static inline void __unlock_user_wr_access(void)
+{
+	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
+		return;
+
+	mtspr(SPRN_AMR, 0);
+	isync();
+}
+
+static inline void __lock_user_wr_access(void)
+{
+	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
+		return;
+
+	mtspr(SPRN_AMR, AMR_LOCKED);
+}
+#endif /* CONFIG_PPC_KUAP */
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 555e22d5e07f..bf25015834ee 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -215,5 +215,8 @@ exc_##label##_book3e:
 #define RFI_TO_USER							\
 	rfi
 
+#define UNLOCK_USER_ACCESS(reg)
+#define LOCK_USER_ACCESS(reg)
+
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 3b4767ed3ec5..d92614c66d87 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -264,6 +264,19 @@ BEGIN_FTR_SECTION_NESTED(943)						\
 	std	ra,offset(r13);						\
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 
+#define LOCK_USER_ACCESS(reg)							\
+BEGIN_MMU_FTR_SECTION_NESTED(944)					\
+	LOAD_REG_IMMEDIATE(reg,AMR_LOCKED);				\
+	mtspr	SPRN_AMR,reg;						\
+END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_KUAP,MMU_FTR_RADIX_KUAP,944)
+
+#define UNLOCK_USER_ACCESS(reg)							\
+BEGIN_MMU_FTR_SECTION_NESTED(945)					\
+	li	reg,0;							\
+	mtspr	SPRN_AMR,reg;						\
+	isync;								\
+END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_KUAP,MMU_FTR_RADIX_KUAP,945)
+
 #define EXCEPTION_PROLOG_0(area)					\
 	GET_PACA(r13);							\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
@@ -500,7 +513,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	beq	4f;			/* if from kernel mode		*/ \
 	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);				   \
 	SAVE_PPR(area, r9);						   \
-4:	EXCEPTION_PROLOG_COMMON_2(area)					   \
+4:	lbz	r9,PACA_USER_ACCESS_ALLOWED(r13);			   \
+	cmpwi	cr1,r9,0;						   \
+	beq	5f;							   \
+	LOCK_USER_ACCESS(r9);						   \
+5:	EXCEPTION_PROLOG_COMMON_2(area)					\
 	EXCEPTION_PROLOG_COMMON_3(n)					   \
 	ACCOUNT_STOLEN_TIME
 
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 5631a906af55..a1450d56d0db 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -107,6 +107,10 @@
  */
 #define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
 
+/* Supports KUAP (key 0 controlling userspace addresses) on radix
+ */
+#define MMU_FTR_RADIX_KUAP		ASM_CONST(0x80000000)
+
 /* MMU feature bit sets for various CPUs */
 #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
 	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
@@ -143,7 +147,10 @@ enum {
 		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
 #ifdef CONFIG_PPC_RADIX_MMU
 		MMU_FTR_TYPE_RADIX |
-#endif
+#ifdef CONFIG_PPC_KUAP
+		MMU_FTR_RADIX_KUAP |
+#endif /* CONFIG_PPC_KUAP */
+#endif /* CONFIG_PPC_RADIX_MMU */
 		0,
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index de52c3166ba4..d9598e6790d8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -246,6 +246,7 @@
 #define SPRN_DSCR	0x11
 #define SPRN_CFAR	0x1c	/* Come From Address Register */
 #define SPRN_AMR	0x1d	/* Authority Mask Register */
+#define   AMR_LOCKED	0xC000000000000000UL /* Read & Write disabled */
 #define SPRN_UAMOR	0x9d	/* User Authority Mask Override Register */
 #define SPRN_AMOR	0x15d	/* Authority Mask Override Register */
 #define SPRN_ACOP	0x1F	/* Available Coprocessor Register */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7b1693adff2a..d5879f32bd34 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -297,7 +297,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	b	.	/* prevent speculative execution */
 
 	/* exit to kernel */
-1:	ld	r2,GPR2(r1)
+1:	/* if the AMR was unlocked before, unlock it again */
+	lbz	r2,PACA_USER_ACCESS_ALLOWED(r13)
+	cmpwi	cr1,0
+	bne	2f
+	UNLOCK_USER_ACCESS(r2)
+2:	ld	r2,GPR2(r1)
 	ld	r1,GPR1(r1)
 	mtlr	r4
 	mtcr	r5
@@ -983,7 +988,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	RFI_TO_USER
 	b	.	/* prevent speculative execution */
 
-1:	mtspr	SPRN_SRR1,r3
+1:	/* exit to kernel */
+	/* if the AMR was unlocked before, unlock it again */
+	lbz	r2,PACA_USER_ACCESS_ALLOWED(r13)
+	cmpwi	cr1,0
+	bne	2f
+	UNLOCK_USER_ACCESS(r2)
+
+2:	mtspr	SPRN_SRR1,r3
 
 	ld	r2,_CCR(r1)
 	mtcrf	0xFF,r2
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index f08a459b4255..b064b542e09f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -29,6 +29,7 @@
 #include <asm/powernv.h>
 #include <asm/sections.h>
 #include <asm/trace.h>
+#include <asm/uaccess.h>
 
 #include <trace/events/thp.h>
 
@@ -550,6 +551,17 @@ void setup_kuep(bool disabled)
 	mtspr(SPRN_IAMR, (1ul << 62));
 }
 
+void setup_kuap(bool disabled)
+{
+	if (disabled)
+		return;
+
+	pr_warn("Activating Kernel Userspace Access Prevention\n");
+
+	cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
+	mtspr(SPRN_AMR, AMR_LOCKED);
+}
+
 void __init radix__early_init_mmu(void)
 {
 	unsigned long lpcr;
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index b271b283c785..bb3cf915016f 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -7,6 +7,7 @@
 
 #include <asm/mman.h>
 #include <asm/setup.h>
+#include <asm/uaccess.h>
 #include <linux/pkeys.h>
 #include <linux/of_device.h>
 
@@ -266,7 +267,8 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 
 void thread_pkey_regs_save(struct thread_struct *thread)
 {
-	if (static_branch_likely(&pkey_disabled))
+	if (static_branch_likely(&pkey_disabled) &&
+	    !mmu_has_feature(MMU_FTR_RADIX_KUAP))
 		return;
 
 	/*
@@ -280,7 +282,8 @@ void thread_pkey_regs_save(struct thread_struct *thread)
 void thread_pkey_regs_restore(struct thread_struct *new_thread,
 			      struct thread_struct *old_thread)
 {
-	if (static_branch_likely(&pkey_disabled))
+	if (static_branch_likely(&pkey_disabled) &&
+	    !mmu_has_feature(MMU_FTR_RADIX_KUAP))
 		return;
 
 	if (old_thread->amr != new_thread->amr)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e6831d0ec159..5fbfa041194d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -335,6 +335,7 @@ config PPC_RADIX_MMU
 	depends on PPC_BOOK3S_64
 	select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
 	select PPC_HAVE_KUEP
+	select PPC_HAVE_KUAP
 	default y
 	help
 	  Enable support for the Power ISA 3.0 Radix style MMU. Currently this
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/4] powerpc: Track KUAP state in the PACA
  2018-11-22 14:04 ` [PATCH 1/4] powerpc: Track KUAP state in the PACA Russell Currey
@ 2018-11-28  9:38   ` Christophe Leroy
  0 siblings, 0 replies; 8+ messages in thread
From: Christophe Leroy @ 2018-11-28  9:38 UTC (permalink / raw)
  To: Russell Currey, linuxppc-dev; +Cc: kernel-hardening

On 11/22/2018 02:04 PM, Russell Currey wrote:
> Necessary for subsequent patches that enable KUAP support for radix.
> Could plausibly be useful for other platforms too, if similar to the
> radix case, reading the register that manages these accesses is
> costly.
> 
> Has the unfortunate downside of another layer of abstraction for
> platforms that implement the locks and unlocks, but this could be
> useful in future for other things too, like counters for benchmarking
> or smartly handling lots of small accesses at once.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>

Build failure.

[root@po14163vm linux-powerpc]# make mpc885_ads_defconfig
#
# configuration written to .config
#
[root@po14163vm linux-powerpc]# make
scripts/kconfig/conf  --syncconfig Kconfig
   UPD     include/config/kernel.release
   UPD     include/generated/utsrelease.h
   CC      kernel/bounds.s
   CC      arch/powerpc/kernel/asm-offsets.s
In file included from ./include/linux/uaccess.h:14:0,
                  from ./include/linux/compat.h:19,
                  from arch/powerpc/kernel/asm-offsets.c:16:
./arch/powerpc/include/asm/uaccess.h: In function ‘unlock_user_rd_access’:
./arch/powerpc/include/asm/uaccess.h:70:2: error: implicit declaration 
of function ‘get_paca’ [-Werror=implicit-function-declaration]
   get_paca()->user_access_allowed = 1;
   ^
./arch/powerpc/include/asm/uaccess.h:70:12: error: invalid type argument 
of ‘->’ (have ‘int’)
   get_paca()->user_access_allowed = 1;
             ^
./arch/powerpc/include/asm/uaccess.h: In function ‘lock_user_rd_access’:
./arch/powerpc/include/asm/uaccess.h:75:12: error: invalid type argument 
of ‘->’ (have ‘int’)
   get_paca()->user_access_allowed = 0;
             ^
./arch/powerpc/include/asm/uaccess.h: In function ‘unlock_user_wr_access’:
./arch/powerpc/include/asm/uaccess.h:80:12: error: invalid type argument 
of ‘->’ (have ‘int’)
   get_paca()->user_access_allowed = 1;
             ^
./arch/powerpc/include/asm/uaccess.h: In function ‘lock_user_wr_access’:
./arch/powerpc/include/asm/uaccess.h:85:12: error: invalid type argument 
of ‘->’ (have ‘int’)
   get_paca()->user_access_allowed = 0;
             ^
cc1: some warnings being treated as errors
make[1]: *** [arch/powerpc/kernel/asm-offsets.s] Error 1
make: *** [prepare0] Error 2

Christophe

> ---
> this is all because I can't do PACA things from radix.h and I spent
> an hour figuring this out at midnight
> ---
>   arch/powerpc/include/asm/nohash/32/pte-8xx.h |  8 +++----
>   arch/powerpc/include/asm/paca.h              |  3 +++
>   arch/powerpc/include/asm/uaccess.h           | 23 +++++++++++++++++++-
>   arch/powerpc/kernel/asm-offsets.c            |  1 +
>   4 files changed, 30 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> index f1ec7cf949d5..7bc0955a56e9 100644
> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
> @@ -137,22 +137,22 @@ static inline pte_t pte_mkhuge(pte_t pte)
>   #define pte_mkhuge pte_mkhuge
>   
>   #ifdef CONFIG_PPC_KUAP
> -static inline void lock_user_wr_access(void)
> +static inline void __lock_user_wr_access(void)
>   {
>   	mtspr(SPRN_MD_AP, MD_APG_KUAP);
>   }
>   
> -static inline void unlock_user_wr_access(void)
> +static inline void __unlock_user_wr_access(void)
>   {
>   	mtspr(SPRN_MD_AP, MD_APG_INIT);
>   }
>   
> -static inline void lock_user_rd_access(void)
> +static inline void __lock_user_rd_access(void)
>   {
>   	mtspr(SPRN_MD_AP, MD_APG_KUAP);
>   }
>   
> -static inline void unlock_user_rd_access(void)
> +static inline void __unlock_user_rd_access(void)
>   {
>   	mtspr(SPRN_MD_AP, MD_APG_INIT);
>   }
> diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
> index e843bc5d1a0f..56236f6d8c89 100644
> --- a/arch/powerpc/include/asm/paca.h
> +++ b/arch/powerpc/include/asm/paca.h
> @@ -169,6 +169,9 @@ struct paca_struct {
>   	u64 saved_r1;			/* r1 save for RTAS calls or PM or EE=0 */
>   	u64 saved_msr;			/* MSR saved here by enter_rtas */
>   	u16 trap_save;			/* Used when bad stack is encountered */
> +#ifdef CONFIG_PPC_KUAP
> +	u8 user_access_allowed;		/* can the kernel access user memory? */
> +#endif
>   	u8 irq_soft_mask;		/* mask for irq soft masking */
>   	u8 irq_happened;		/* irq happened while soft-disabled */
>   	u8 io_sync;			/* writel() needs spin_unlock sync */
> diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
> index 2f3625cbfcee..76dae1095f7e 100644
> --- a/arch/powerpc/include/asm/uaccess.h
> +++ b/arch/powerpc/include/asm/uaccess.h
> @@ -63,7 +63,28 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
>   
>   #endif
>   
> -#ifndef CONFIG_PPC_KUAP
> +#ifdef CONFIG_PPC_KUAP
> +static inline void unlock_user_rd_access(void)
> +{
> +	__unlock_user_rd_access();
> +	get_paca()->user_access_allowed = 1;
> +}
> +static inline void lock_user_rd_access(void)
> +{
> +	__lock_user_rd_access();
> +	get_paca()->user_access_allowed = 0;
> +}
> +static inline void unlock_user_wr_access(void)
> +{
> +	__unlock_user_wr_access();
> +	get_paca()->user_access_allowed = 1;
> +}
> +static inline void lock_user_wr_access(void)
> +{
> +	__lock_user_wr_access();
> +	get_paca()->user_access_allowed = 0;
> +}
> +#else
>   static inline void unlock_user_rd_access(void) { }
>   static inline void lock_user_rd_access(void) { }
>   static inline void unlock_user_wr_access(void) { }
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index da2f5d011ddb..899e9835b45f 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -260,6 +260,7 @@ int main(void)
>   	OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
>   	OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
>   	OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
> +	OFFSET(PACA_USER_ACCESS_ALLOWED, paca_struct, user_access_allowed);
>   	OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
>   	OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
>   	OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/4] powerpc/64: Setup KUP before feature fixups
  2018-11-22 14:04 ` [PATCH 2/4] powerpc/64: Setup KUP before feature fixups Russell Currey
@ 2018-11-28  9:38   ` Christophe Leroy
  0 siblings, 0 replies; 8+ messages in thread
From: Christophe Leroy @ 2018-11-28  9:38 UTC (permalink / raw)
  To: Russell Currey, linuxppc-dev; +Cc: kernel-hardening



On 11/22/2018 02:04 PM, Russell Currey wrote:
> The subsequent implementation of KUAP for radix makes use of a MMU
> feature in order to patch out assembly when KUAP is disabled or
> unsupported.  This won't work unless there's an entry point for
> KUP support before the feature magic happens, so relocate
> setup_kup() earlier in setup.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>

I squashed it in my RFC v2

Christophe

> ---
>   arch/powerpc/kernel/setup_64.c | 7 ++++++-
>   1 file changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
> index 0f4e06ab70a5..cc20dc3e7b69 100644
> --- a/arch/powerpc/kernel/setup_64.c
> +++ b/arch/powerpc/kernel/setup_64.c
> @@ -331,6 +331,12 @@ void __init early_setup(unsigned long dt_ptr)
>   	 */
>   	configure_exceptions();
>   
> +	/*
> +	 * Configure Kernel Userspace Protection. This needs to happen before
> +	 * feature fixups for platforms that implement this using features.
> +	 */
> +	setup_kup();
> +
>   	/* Apply all the dynamic patching */
>   	apply_feature_fixups();
>   	setup_feature_keys();
> @@ -372,7 +378,6 @@ void __init early_setup(unsigned long dt_ptr)
>   	 */
>   	btext_map();
>   #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
> -	setup_kup();
>   }
>   
>   #ifdef CONFIG_SMP
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/4] powerpc/64s: Implement KUAP for Radix MMU
  2018-11-22 14:04 ` [PATCH 4/4] powerpc/64s: Implement KUAP " Russell Currey
@ 2018-11-28  9:39   ` Christophe Leroy
  0 siblings, 0 replies; 8+ messages in thread
From: Christophe Leroy @ 2018-11-28  9:39 UTC (permalink / raw)
  To: Russell Currey, linuxppc-dev; +Cc: kernel-hardening



On 11/22/2018 02:04 PM, Russell Currey wrote:
> Kernel Userspace Access Prevention utilises a feature of
> the Radix MMU which disallows read and write access to userspace
> addresses.  By utilising this, the kernel is prevented from accessing
> user data from outside of trusted paths that perform proper safety checks,
> such as copy_{to/from}_user() and friends.
> 
> Userspace access is disabled from early boot and is only enabled when:
> 
>          - exiting the kernel and entering userspace
>          - performing an operation like copy_{to/from}_user()
>          - context switching to a process that has access enabled
> 
> and similarly, access is disabled again when exiting userspace and entering
> the kernel.
> 
> This feature has a slight performance impact which I roughly measured to be
> 3% slower in the worst case (performing 1GB of 1 byte read()/write()
> syscalls), and is gated behind the CONFIG_PPC_KUAP option for
> performance-critical builds.
> 
> This feature can be tested by using the lkdtm driver (CONFIG_LKDTM=y) and
> performing the following:
> 
>          echo ACCESS_USERSPACE > [debugfs]/provoke-crash/DIRECT
> 
> if enabled, this should send SIGSEGV to the thread.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>

I squashed the paca thing into this one in RFC v2

Christophe

> ---
>   arch/powerpc/include/asm/book3s/64/radix.h | 43 ++++++++++++++++++++++
>   arch/powerpc/include/asm/exception-64e.h   |  3 ++
>   arch/powerpc/include/asm/exception-64s.h   | 19 +++++++++-
>   arch/powerpc/include/asm/mmu.h             |  9 ++++-
>   arch/powerpc/include/asm/reg.h             |  1 +
>   arch/powerpc/kernel/entry_64.S             | 16 +++++++-
>   arch/powerpc/mm/pgtable-radix.c            | 12 ++++++
>   arch/powerpc/mm/pkeys.c                    |  7 +++-
>   arch/powerpc/platforms/Kconfig.cputype     |  1 +
>   9 files changed, 105 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
> index 7d1a3d1543fc..9af93d05e6fa 100644
> --- a/arch/powerpc/include/asm/book3s/64/radix.h
> +++ b/arch/powerpc/include/asm/book3s/64/radix.h
> @@ -284,5 +284,48 @@ static inline unsigned long radix__get_tree_size(void)
>   int radix__create_section_mapping(unsigned long start, unsigned long end, int nid);
>   int radix__remove_section_mapping(unsigned long start, unsigned long end);
>   #endif /* CONFIG_MEMORY_HOTPLUG */
> +
> +#ifdef CONFIG_PPC_KUAP
> +#include <asm/reg.h>
> +/*
> + * We do have the ability to individually lock/unlock reads and writes rather
> + * than both at once, however it's a significant performance hit due to needing
> + * to do a read-modify-write, which adds a mfspr, which is slow.  As a result,
> + * locking/unlocking both at once is preferred.
> + */
> +static inline void __unlock_user_rd_access(void)
> +{
> +	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
> +		return;
> +
> +	mtspr(SPRN_AMR, 0);
> +	isync();
> +}
> +
> +static inline void __lock_user_rd_access(void)
> +{
> +	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
> +		return;
> +
> +	mtspr(SPRN_AMR, AMR_LOCKED);
> +}
> +
> +static inline void __unlock_user_wr_access(void)
> +{
> +	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
> +		return;
> +
> +	mtspr(SPRN_AMR, 0);
> +	isync();
> +}
> +
> +static inline void __lock_user_wr_access(void)
> +{
> +	if (!mmu_has_feature(MMU_FTR_RADIX_KUAP))
> +		return;
> +
> +	mtspr(SPRN_AMR, AMR_LOCKED);
> +}
> +#endif /* CONFIG_PPC_KUAP */
>   #endif /* __ASSEMBLY__ */
>   #endif
> diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
> index 555e22d5e07f..bf25015834ee 100644
> --- a/arch/powerpc/include/asm/exception-64e.h
> +++ b/arch/powerpc/include/asm/exception-64e.h
> @@ -215,5 +215,8 @@ exc_##label##_book3e:
>   #define RFI_TO_USER							\
>   	rfi
>   
> +#define UNLOCK_USER_ACCESS(reg)
> +#define LOCK_USER_ACCESS(reg)
> +
>   #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
>   
> diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
> index 3b4767ed3ec5..d92614c66d87 100644
> --- a/arch/powerpc/include/asm/exception-64s.h
> +++ b/arch/powerpc/include/asm/exception-64s.h
> @@ -264,6 +264,19 @@ BEGIN_FTR_SECTION_NESTED(943)						\
>   	std	ra,offset(r13);						\
>   END_FTR_SECTION_NESTED(ftr,ftr,943)
>   
> +#define LOCK_USER_ACCESS(reg)							\
> +BEGIN_MMU_FTR_SECTION_NESTED(944)					\
> +	LOAD_REG_IMMEDIATE(reg,AMR_LOCKED);				\
> +	mtspr	SPRN_AMR,reg;						\
> +END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_KUAP,MMU_FTR_RADIX_KUAP,944)
> +
> +#define UNLOCK_USER_ACCESS(reg)							\
> +BEGIN_MMU_FTR_SECTION_NESTED(945)					\
> +	li	reg,0;							\
> +	mtspr	SPRN_AMR,reg;						\
> +	isync;								\
> +END_MMU_FTR_SECTION_NESTED(MMU_FTR_RADIX_KUAP,MMU_FTR_RADIX_KUAP,945)
> +
>   #define EXCEPTION_PROLOG_0(area)					\
>   	GET_PACA(r13);							\
>   	std	r9,area+EX_R9(r13);	/* save r9 */			\
> @@ -500,7 +513,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
>   	beq	4f;			/* if from kernel mode		*/ \
>   	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);				   \
>   	SAVE_PPR(area, r9);						   \
> -4:	EXCEPTION_PROLOG_COMMON_2(area)					   \
> +4:	lbz	r9,PACA_USER_ACCESS_ALLOWED(r13);			   \
> +	cmpwi	cr1,r9,0;						   \
> +	beq	5f;							   \
> +	LOCK_USER_ACCESS(r9);						   \
> +5:	EXCEPTION_PROLOG_COMMON_2(area)					\
>   	EXCEPTION_PROLOG_COMMON_3(n)					   \
>   	ACCOUNT_STOLEN_TIME
>   
> diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
> index 5631a906af55..a1450d56d0db 100644
> --- a/arch/powerpc/include/asm/mmu.h
> +++ b/arch/powerpc/include/asm/mmu.h
> @@ -107,6 +107,10 @@
>    */
>   #define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
>   
> +/* Supports KUAP (key 0 controlling userspace addresses) on radix
> + */
> +#define MMU_FTR_RADIX_KUAP		ASM_CONST(0x80000000)
> +
>   /* MMU feature bit sets for various CPUs */
>   #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
>   	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
> @@ -143,7 +147,10 @@ enum {
>   		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
>   #ifdef CONFIG_PPC_RADIX_MMU
>   		MMU_FTR_TYPE_RADIX |
> -#endif
> +#ifdef CONFIG_PPC_KUAP
> +		MMU_FTR_RADIX_KUAP |
> +#endif /* CONFIG_PPC_KUAP */
> +#endif /* CONFIG_PPC_RADIX_MMU */
>   		0,
>   };
>   
> diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
> index de52c3166ba4..d9598e6790d8 100644
> --- a/arch/powerpc/include/asm/reg.h
> +++ b/arch/powerpc/include/asm/reg.h
> @@ -246,6 +246,7 @@
>   #define SPRN_DSCR	0x11
>   #define SPRN_CFAR	0x1c	/* Come From Address Register */
>   #define SPRN_AMR	0x1d	/* Authority Mask Register */
> +#define   AMR_LOCKED	0xC000000000000000UL /* Read & Write disabled */
>   #define SPRN_UAMOR	0x9d	/* User Authority Mask Override Register */
>   #define SPRN_AMOR	0x15d	/* Authority Mask Override Register */
>   #define SPRN_ACOP	0x1F	/* Available Coprocessor Register */
> diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
> index 7b1693adff2a..d5879f32bd34 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -297,7 +297,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>   	b	.	/* prevent speculative execution */
>   
>   	/* exit to kernel */
> -1:	ld	r2,GPR2(r1)
> +1:	/* if the AMR was unlocked before, unlock it again */
> +	lbz	r2,PACA_USER_ACCESS_ALLOWED(r13)
> +	cmpwi	cr1,0
> +	bne	2f
> +	UNLOCK_USER_ACCESS(r2)
> +2:	ld	r2,GPR2(r1)
>   	ld	r1,GPR1(r1)
>   	mtlr	r4
>   	mtcr	r5
> @@ -983,7 +988,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>   	RFI_TO_USER
>   	b	.	/* prevent speculative execution */
>   
> -1:	mtspr	SPRN_SRR1,r3
> +1:	/* exit to kernel */
> +	/* if the AMR was unlocked before, unlock it again */
> +	lbz	r2,PACA_USER_ACCESS_ALLOWED(r13)
> +	cmpwi	cr1,0
> +	bne	2f
> +	UNLOCK_USER_ACCESS(r2)
> +
> +2:	mtspr	SPRN_SRR1,r3
>   
>   	ld	r2,_CCR(r1)
>   	mtcrf	0xFF,r2
> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> index f08a459b4255..b064b542e09f 100644
> --- a/arch/powerpc/mm/pgtable-radix.c
> +++ b/arch/powerpc/mm/pgtable-radix.c
> @@ -29,6 +29,7 @@
>   #include <asm/powernv.h>
>   #include <asm/sections.h>
>   #include <asm/trace.h>
> +#include <asm/uaccess.h>
>   
>   #include <trace/events/thp.h>
>   
> @@ -550,6 +551,17 @@ void setup_kuep(bool disabled)
>   	mtspr(SPRN_IAMR, (1ul << 62));
>   }
>   
> +void setup_kuap(bool disabled)
> +{
> +	if (disabled)
> +		return;
> +
> +	pr_warn("Activating Kernel Userspace Access Prevention\n");
> +
> +	cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
> +	mtspr(SPRN_AMR, AMR_LOCKED);
> +}
> +
>   void __init radix__early_init_mmu(void)
>   {
>   	unsigned long lpcr;
> diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
> index b271b283c785..bb3cf915016f 100644
> --- a/arch/powerpc/mm/pkeys.c
> +++ b/arch/powerpc/mm/pkeys.c
> @@ -7,6 +7,7 @@
>   
>   #include <asm/mman.h>
>   #include <asm/setup.h>
> +#include <asm/uaccess.h>
>   #include <linux/pkeys.h>
>   #include <linux/of_device.h>
>   
> @@ -266,7 +267,8 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
>   
>   void thread_pkey_regs_save(struct thread_struct *thread)
>   {
> -	if (static_branch_likely(&pkey_disabled))
> +	if (static_branch_likely(&pkey_disabled) &&
> +	    !mmu_has_feature(MMU_FTR_RADIX_KUAP))
>   		return;
>   
>   	/*
> @@ -280,7 +282,8 @@ void thread_pkey_regs_save(struct thread_struct *thread)
>   void thread_pkey_regs_restore(struct thread_struct *new_thread,
>   			      struct thread_struct *old_thread)
>   {
> -	if (static_branch_likely(&pkey_disabled))
> +	if (static_branch_likely(&pkey_disabled) &&
> +	    !mmu_has_feature(MMU_FTR_RADIX_KUAP))
>   		return;
>   
>   	if (old_thread->amr != new_thread->amr)
> diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
> index e6831d0ec159..5fbfa041194d 100644
> --- a/arch/powerpc/platforms/Kconfig.cputype
> +++ b/arch/powerpc/platforms/Kconfig.cputype
> @@ -335,6 +335,7 @@ config PPC_RADIX_MMU
>   	depends on PPC_BOOK3S_64
>   	select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
>   	select PPC_HAVE_KUEP
> +	select PPC_HAVE_KUAP
>   	default y
>   	help
>   	  Enable support for the Power ISA 3.0 Radix style MMU. Currently this
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-11-28  9:58 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-22 14:04 [PATCH 0/4] Kernel Userspace Protection for Radix MMU Russell Currey
2018-11-22 14:04 ` [PATCH 1/4] powerpc: Track KUAP state in the PACA Russell Currey
2018-11-28  9:38   ` Christophe Leroy
2018-11-22 14:04 ` [PATCH 2/4] powerpc/64: Setup KUP before feature fixups Russell Currey
2018-11-28  9:38   ` Christophe Leroy
2018-11-22 14:04 ` [PATCH 3/4] powerpc/mm/radix: Use KUEP API for Radix MMU Russell Currey
2018-11-22 14:04 ` [PATCH 4/4] powerpc/64s: Implement KUAP " Russell Currey
2018-11-28  9:39   ` Christophe Leroy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).