All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines
@ 2021-03-16  3:17 Jordan Niethe
  2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
                   ` (7 more replies)
  0 siblings, 8 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, Jordan Niethe, dja

From: Russell Currey <ruscur@russell.cc>

The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
and are generally useful primitives to have.  This implementation is
designed to be completely generic across powerpc's many MMUs.

It's possible that this could be optimised to be faster for specific
MMUs, but the focus is on having a generic and safe implementation for
now.

This implementation does not handle cases where the caller is attempting
to change the mapping of the page it is executing from, or if another
CPU is concurrently using the page being altered.  These cases likely
shouldn't happen, but a more complex implementation with MMU-specific code
could safely handle them, so that is left as a TODO for now.

These functions do nothing if STRICT_KERNEL_RWX is not enabled.

Reviewed-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[jpn: rebase on next plus "powerpc/mm/64s: Allow STRICT_KERNEL_RWX again"]
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/Kconfig                  |  1 +
 arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
 arch/powerpc/mm/Makefile              |  2 +-
 arch/powerpc/mm/pageattr.c            | 81 +++++++++++++++++++++++++++
 4 files changed, 115 insertions(+), 1 deletion(-)
 create mode 100644 arch/powerpc/include/asm/set_memory.h
 create mode 100644 arch/powerpc/mm/pageattr.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fc7f5c5933e6..4498a27ac9db 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,7 @@ config PPC
 	select ARCH_HAS_MEMBARRIER_CALLBACKS
 	select ARCH_HAS_MEMBARRIER_SYNC_CORE
 	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
+	select ARCH_HAS_SET_MEMORY
 	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UACCESS_FLUSHCACHE
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..64011ea444b4
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO	0
+#define SET_MEMORY_RW	1
+#define SET_MEMORY_NX	2
+#define SET_MEMORY_X	3
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+#endif
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 3b4e9e4e25ea..d8a08abde1ae 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
 
 ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
 
-obj-y				:= fault.o mem.o pgtable.o mmap.o maccess.o \
+obj-y				:= fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
 				   init_$(BITS).o pgtable_$(BITS).o \
 				   pgtable-frag.o ioremap.o ioremap_$(BITS).o \
 				   init-common.o mmu_context.o drmem.o
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..2da3fbab6ff7
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * This is unsafe if the caller is attempting to change the mapping of the
+ * page it is executing from, or if another CPU is concurrently using the
+ * page being altered.
+ *
+ * TODO make the implementation resistant to this.
+ *
+ * NOTE: can be dangerous to call without STRICT_KERNEL_RWX
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+	long action = (long)data;
+	pte_t pte;
+
+	spin_lock(&init_mm.page_table_lock);
+
+	/* invalidate the PTE so it's safe to modify */
+	pte = ptep_get_and_clear(&init_mm, addr, ptep);
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+	/* modify the PTE bits as desired, then apply */
+	switch (action) {
+	case SET_MEMORY_RO:
+		pte = pte_wrprotect(pte);
+		break;
+	case SET_MEMORY_RW:
+		pte = pte_mkwrite(pte);
+		break;
+	case SET_MEMORY_NX:
+		pte = pte_exprotect(pte);
+		break;
+	case SET_MEMORY_X:
+		pte = pte_mkexec(pte);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	set_pte_at(&init_mm, addr, ptep, pte);
+	spin_unlock(&init_mm.page_table_lock);
+
+	return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, long action)
+{
+	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+	unsigned long sz = numpages * PAGE_SIZE;
+
+	if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+		return 0;
+
+	if (numpages <= 0)
+		return 0;
+
+	return apply_to_existing_page_range(&init_mm, start, sz,
+					    change_page_attr, (void *)action);
+}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  3:36   ` Russell Currey
  2021-03-16  6:32   ` Christophe Leroy
  2021-03-16  3:17 ` [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO Jordan Niethe
                   ` (6 subsequent siblings)
  7 siblings, 2 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, Jordan Niethe, naveen.n.rao, dja

setup_text_poke_area() is a late init call so it runs before
mark_rodata_ro() and after the init calls. This lets all the init code
patching simply write to their locations. In the future, kprobes is
going to allocate its instruction pages RO which means they will need
setup_text__poke_area() to have been already called for their code
patching. However, init_kprobes() (which allocates and patches some
instruction pages) is an early init call so it happens before
setup_text__poke_area().

start_kernel() calls poking_init() before any of the init calls. On
powerpc, poking_init() is currently a nop. setup_text_poke_area() relies
on kernel virtual memory, cpu hotplug and per_cpu_areas being setup.
setup_per_cpu_areas(), boot_cpu_hotplug_init() and mm_init() are called
before poking_init().

Turn setup_text_poke_area() into poking_init().

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v9: New to series
---
 arch/powerpc/lib/code-patching.c | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 2333625b5e31..b28afa1133db 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -65,14 +65,11 @@ static int text_area_cpu_down(unsigned int cpu)
 }
 
 /*
- * Run as a late init call. This allows all the boot time patching to be done
- * simply by patching the code, and then we're called here prior to
- * mark_rodata_ro(), which happens after all init calls are run. Although
- * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
- * it as being preferable to a kernel that will crash later when someone tries
- * to use patch_instruction().
+ * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
+ * we judge it as being preferable to a kernel that will crash later when
+ * someone tries to use patch_instruction().
  */
-static int __init setup_text_poke_area(void)
+int __init poking_init(void)
 {
 	BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 		"powerpc/text_poke:online", text_area_cpu_up,
@@ -80,7 +77,6 @@ static int __init setup_text_poke_area(void)
 
 	return 0;
 }
-late_initcall(setup_text_poke_area);
 
 /*
  * This can be called for kernel text or a module.
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
  2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  6:44   ` Christophe Leroy
  2021-03-17  6:12   ` Christophe Leroy
  2021-03-16  3:17 ` [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime Jordan Niethe
                   ` (5 subsequent siblings)
  7 siblings, 2 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, Jordan Niethe, dja

From: Russell Currey <ruscur@russell.cc>

With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
W+X page at boot by default.  This can be tested with
CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
kernel log during boot.

Add an arch specific insn page allocator which returns RO pages if
STRICT_KERNEL_RWX is enabled. This page is only written to with
patch_instruction() which is able to write RO pages.

Reviewed-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[jpn: Reword commit message, switch from vmalloc_exec(), add
      free_insn_page()]
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v9: - vmalloc_exec() no longer exists
    - Set the page to RW before freeing it
---
 arch/powerpc/kernel/kprobes.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 01ab2163659e..bb7e4d321988 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -25,6 +25,8 @@
 #include <asm/sections.h>
 #include <asm/inst.h>
 #include <linux/uaccess.h>
+#include <linux/set_memory.h>
+#include <linux/vmalloc.h>
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -103,6 +105,26 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
 	return addr;
 }
 
+void *alloc_insn_page(void)
+{
+	void *page = vmalloc(PAGE_SIZE);
+
+	if (!page)
+		return NULL;
+
+	set_memory_ro((unsigned long)page, 1);
+	set_memory_x((unsigned long)page, 1);
+
+	return page;
+}
+
+void free_insn_page(void *page)
+{
+	set_memory_nx((unsigned long)page, 1);
+	set_memory_rw((unsigned long)page, 1);
+	vfree(page);
+}
+
 int arch_prepare_kprobe(struct kprobe *p)
 {
 	int ret = 0;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
  2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
  2021-03-16  3:17 ` [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  6:47   ` Christophe Leroy
  2021-03-16  3:17 ` [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX Jordan Niethe
                   ` (4 subsequent siblings)
  7 siblings, 1 reply; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, Kees Cook, npiggin, naveen.n.rao,
	Jordan Niethe, dja

From: Russell Currey <ruscur@russell.cc>

Very rudimentary, just

	echo 1 > [debugfs]/check_wx_pages

and check the kernel log.  Useful for testing strict module RWX.

Updated the Kconfig entry to reflect this.

Also fixed a typo.

Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/Kconfig.debug      |  6 ++++--
 arch/powerpc/mm/ptdump/ptdump.c | 21 ++++++++++++++++++++-
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ae084357994e..56e99e9a30d9 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -371,7 +371,7 @@ config PPC_PTDUMP
 	  If you are unsure, say N.
 
 config PPC_DEBUG_WX
-	bool "Warn on W+X mappings at boot"
+	bool "Warn on W+X mappings at boot & enable manual checks at runtime"
 	depends on PPC_PTDUMP && STRICT_KERNEL_RWX
 	help
 	  Generate a warning if any W+X mappings are found at boot.
@@ -385,7 +385,9 @@ config PPC_DEBUG_WX
 	  of other unfixed kernel bugs easier.
 
 	  There is no runtime or memory usage effect of this option
-	  once the kernel has booted up - it's a one time check.
+	  once the kernel has booted up, it only automatically checks once.
+
+	  Enables the "check_wx_pages" debugfs entry for checking at runtime.
 
 	  If in doubt, say "Y".
 
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index aca354fb670b..78497d57b66b 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -4,7 +4,7 @@
  *
  * This traverses the kernel pagetables and dumps the
  * information about the used sections of memory to
- * /sys/kernel/debug/kernel_pagetables.
+ * /sys/kernel/debug/kernel_page_tables.
  *
  * Derived from the arm64 implementation:
  * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
@@ -459,6 +459,25 @@ void ptdump_check_wx(void)
 	else
 		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
 }
+
+static int check_wx_debugfs_set(void *data, u64 val)
+{
+	if (val != 1ULL)
+		return -EINVAL;
+
+	ptdump_check_wx();
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(check_wx_fops, NULL, check_wx_debugfs_set, "%llu\n");
+
+static int ptdump_check_wx_init(void)
+{
+	return debugfs_create_file("check_wx_pages", 0200, NULL,
+				   NULL, &check_wx_fops) ? 0 : -ENOMEM;
+}
+device_initcall(ptdump_check_wx_init);
 #endif
 
 static int ptdump_init(void)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
                   ` (2 preceding siblings ...)
  2021-03-16  3:17 ` [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  6:51   ` Christophe Leroy
  2021-03-16  3:17 ` [PATCH v9 6/8] powerpc/configs: Enable STRICT_MODULE_RWX in skiroot_defconfig Jordan Niethe
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, Jordan Niethe, dja

From: Russell Currey <ruscur@russell.cc>

To enable strict module RWX on powerpc, set:

    CONFIG_STRICT_MODULE_RWX=y

You should also have CONFIG_STRICT_KERNEL_RWX=y set to have any real
security benefit.

ARCH_HAS_STRICT_MODULE_RWX is set to require ARCH_HAS_STRICT_KERNEL_RWX.
This is due to a quirk in arch/Kconfig and arch/powerpc/Kconfig that
makes STRICT_MODULE_RWX *on by default* in configurations where
STRICT_KERNEL_RWX is *unavailable*.

Since this doesn't make much sense, and module RWX without kernel RWX
doesn't make much sense, having the same dependencies as kernel RWX
works around this problem.

Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/Kconfig | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4498a27ac9db..d9cadc4212d0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,6 +137,7 @@ config PPC
 	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
 	select ARCH_HAS_SET_MEMORY
 	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+	select ARCH_HAS_STRICT_MODULE_RWX	if ARCH_HAS_STRICT_KERNEL_RWX
 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UACCESS_FLUSHCACHE
 	select ARCH_HAS_COPY_MC			if PPC64
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 6/8] powerpc/configs: Enable STRICT_MODULE_RWX in skiroot_defconfig
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
                   ` (3 preceding siblings ...)
  2021-03-16  3:17 ` [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  3:17 ` [PATCH v9 7/8] powerpc/mm: implement set_memory_attr() Jordan Niethe
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, Joel Stanley, npiggin, naveen.n.rao,
	Jordan Niethe, dja

From: Russell Currey <ruscur@russell.cc>

skiroot_defconfig is the only powerpc defconfig with STRICT_KERNEL_RWX
enabled, and if you want memory protection for kernel text you'd want it
for modules too, so enable STRICT_MODULE_RWX there.

Acked-by: Joel Stanley <joel@joel.id.au>
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/configs/skiroot_defconfig | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index b806a5d3a695..50fe06cb3a31 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -50,6 +50,7 @@ CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
 # CONFIG_PPC_MEM_KEYS is not set
 CONFIG_JUMP_LABEL=y
 CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_STRICT_MODULE_RWX=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_SIG_FORCE=y
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 7/8] powerpc/mm: implement set_memory_attr()
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
                   ` (4 preceding siblings ...)
  2021-03-16  3:17 ` [PATCH v9 6/8] powerpc/configs: Enable STRICT_MODULE_RWX in skiroot_defconfig Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-16  7:25   ` Christophe Leroy
  2021-03-16  3:17 ` [PATCH v9 8/8] powerpc/32: use set_memory_attr() Jordan Niethe
  2021-03-19  1:19 ` [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Michael Ellerman
  7 siblings, 1 reply; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, kbuild test robot, naveen.n.rao,
	Jordan Niethe, dja

From: Christophe Leroy <christophe.leroy@c-s.fr>

In addition to the set_memory_xx() functions which allows to change
the memory attributes of not (yet) used memory regions, implement a
set_memory_attr() function to:
- set the final memory protection after init on currently used
kernel regions.
- enable/disable kernel memory regions in the scope of DEBUG_PAGEALLOC.

Unlike the set_memory_xx() which can act in three step as the regions
are unused, this function must modify 'on the fly' as the kernel is
executing from them. At the moment only PPC32 will use it and changing
page attributes on the fly is not an issue.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reported-by: kbuild test robot <lkp@intel.com>
[ruscur: cast "data" to unsigned long instead of int]
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/include/asm/set_memory.h |  2 ++
 arch/powerpc/mm/pageattr.c            | 33 +++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
index 64011ea444b4..b040094f7920 100644
--- a/arch/powerpc/include/asm/set_memory.h
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -29,4 +29,6 @@ static inline int set_memory_x(unsigned long addr, int numpages)
 	return change_memory_attr(addr, numpages, SET_MEMORY_X);
 }
 
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
+
 #endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 2da3fbab6ff7..2fde1b195c85 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -79,3 +79,36 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
 	return apply_to_existing_page_range(&init_mm, start, sz,
 					    change_page_attr, (void *)action);
 }
+
+/*
+ * Set the attributes of a page:
+ *
+ * This function is used by PPC32 at the end of init to set final kernel memory
+ * protection. It includes changing the maping of the page it is executing from
+ * and data pages it is using.
+ */
+static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+	pgprot_t prot = __pgprot((unsigned long)data);
+
+	spin_lock(&init_mm.page_table_lock);
+
+	set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+	spin_unlock(&init_mm.page_table_lock);
+
+	return 0;
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
+{
+	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+	unsigned long sz = numpages * PAGE_SIZE;
+
+	if (numpages <= 0)
+		return 0;
+
+	return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
+					    (void *)pgprot_val(prot));
+}
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH v9 8/8] powerpc/32: use set_memory_attr()
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
                   ` (5 preceding siblings ...)
  2021-03-16  3:17 ` [PATCH v9 7/8] powerpc/mm: implement set_memory_attr() Jordan Niethe
@ 2021-03-16  3:17 ` Jordan Niethe
  2021-03-19  1:19 ` [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Michael Ellerman
  7 siblings, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-16  3:17 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, Jordan Niethe, dja

From: Christophe Leroy <christophe.leroy@c-s.fr>

Use set_memory_attr() instead of the PPC32 specific change_page_attr()

change_page_attr() was checking that the address was not mapped by
blocks and was handling highmem, but that's unneeded because the
affected pages can't be in highmem and block mapping verification
is already done by the callers.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[ruscur: rebase on powerpc/merge with Christophe's new patches]
Signed-off-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/mm/pgtable_32.c | 60 ++++++------------------------------
 1 file changed, 10 insertions(+), 50 deletions(-)

diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e0ec67a16887..dcf5ecca19d9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/set_memory.h>
 
 #include <asm/pgalloc.h>
 #include <asm/fixmap.h>
@@ -132,64 +133,20 @@ void __init mapin_ram(void)
 	}
 }
 
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
-	pte_t *kpte;
-	unsigned long address;
-
-	BUG_ON(PageHighMem(page));
-	address = (unsigned long)page_address(page);
-
-	if (v_block_mapped(address))
-		return 0;
-	kpte = virt_to_kpte(address);
-	if (!kpte)
-		return -EINVAL;
-	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-
-	return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
-	int i, err = 0;
-	unsigned long flags;
-	struct page *start = page;
-
-	local_irq_save(flags);
-	for (i = 0; i < numpages; i++, page++) {
-		err = __change_page_attr_noflush(page, prot);
-		if (err)
-			break;
-	}
-	wmb();
-	local_irq_restore(flags);
-	flush_tlb_kernel_range((unsigned long)page_address(start),
-			       (unsigned long)page_address(page));
-	return err;
-}
-
 void mark_initmem_nx(void)
 {
-	struct page *page = virt_to_page(_sinittext);
 	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
 				 PFN_DOWN((unsigned long)_sinittext);
 
 	if (v_block_mapped((unsigned long)_sinittext))
 		mmu_mark_initmem_nx();
 	else
-		change_page_attr(page, numpages, PAGE_KERNEL);
+		set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mark_rodata_ro(void)
 {
-	struct page *page;
 	unsigned long numpages;
 
 	if (v_block_mapped((unsigned long)_stext + 1)) {
@@ -198,20 +155,18 @@ void mark_rodata_ro(void)
 		return;
 	}
 
-	page = virt_to_page(_stext);
 	numpages = PFN_UP((unsigned long)_etext) -
 		   PFN_DOWN((unsigned long)_stext);
 
-	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+	set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
 	/*
 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
 	 * to cover NOTES and EXCEPTION_TABLE.
 	 */
-	page = virt_to_page(__start_rodata);
 	numpages = PFN_UP((unsigned long)__init_begin) -
 		   PFN_DOWN((unsigned long)__start_rodata);
 
-	change_page_attr(page, numpages, PAGE_KERNEL_RO);
+	set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
 
 	// mark_initmem_nx() should have already run by now
 	ptdump_check_wx();
@@ -221,9 +176,14 @@ void mark_rodata_ro(void)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
+	unsigned long addr = (unsigned long)page_address(page);
+
 	if (PageHighMem(page))
 		return;
 
-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+	if (enable)
+		set_memory_attr(addr, numpages, PAGE_KERNEL);
+	else
+		set_memory_attr(addr, numpages, __pgprot(0));
 }
 #endif /* CONFIG_DEBUG_PAGEALLOC */
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier
  2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
@ 2021-03-16  3:36   ` Russell Currey
  2021-03-16  6:32   ` Christophe Leroy
  1 sibling, 0 replies; 24+ messages in thread
From: Russell Currey @ 2021-03-16  3:36 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, naveen.n.rao, ajd, npiggin, dja

On Tue, 2021-03-16 at 14:17 +1100, Jordan Niethe wrote:
> setup_text_poke_area() is a late init call so it runs before
> mark_rodata_ro() and after the init calls. This lets all the init
> code
> patching simply write to their locations. In the future, kprobes is
> going to allocate its instruction pages RO which means they will need
> setup_text__poke_area() to have been already called for their code
> patching. However, init_kprobes() (which allocates and patches some
> instruction pages) is an early init call so it happens before
> setup_text__poke_area().
> 
> start_kernel() calls poking_init() before any of the init calls. On
> powerpc, poking_init() is currently a nop. setup_text_poke_area()
> relies
> on kernel virtual memory, cpu hotplug and per_cpu_areas being setup.
> setup_per_cpu_areas(), boot_cpu_hotplug_init() and mm_init() are
> called
> before poking_init().
> 
> Turn setup_text_poke_area() into poking_init().
> 
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>

Good job finding & fixing this bug!

Reviewed-by: Russell Currey <ruscur@russell.cc>

> ---
> v9: New to series
> ---
>  arch/powerpc/lib/code-patching.c | 12 ++++--------
>  1 file changed, 4 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/lib/code-patching.c
> b/arch/powerpc/lib/code-patching.c
> index 2333625b5e31..b28afa1133db 100644
> --- a/arch/powerpc/lib/code-patching.c
> +++ b/arch/powerpc/lib/code-patching.c
> @@ -65,14 +65,11 @@ static int text_area_cpu_down(unsigned int cpu)
>  }
>  
>  /*
> - * Run as a late init call. This allows all the boot time patching
> to be done
> - * simply by patching the code, and then we're called here prior to
> - * mark_rodata_ro(), which happens after all init calls are run.
> Although
> - * BUG_ON() is rude, in this case it should only happen if ENOMEM,
> and we judge
> - * it as being preferable to a kernel that will crash later when
> someone tries
> - * to use patch_instruction().
> + * Although BUG_ON() is rude, in this case it should only happen if
> ENOMEM, and
> + * we judge it as being preferable to a kernel that will crash later
> when
> + * someone tries to use patch_instruction().
>   */
> -static int __init setup_text_poke_area(void)
> +int __init poking_init(void)
>  {
>         BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
>                 "powerpc/text_poke:online", text_area_cpu_up,
> @@ -80,7 +77,6 @@ static int __init setup_text_poke_area(void)
>  
>         return 0;
>  }
> -late_initcall(setup_text_poke_area);
>  
>  /*
>   * This can be called for kernel text or a module.



^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier
  2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
  2021-03-16  3:36   ` Russell Currey
@ 2021-03-16  6:32   ` Christophe Leroy
  2021-03-17  0:38     ` Jordan Niethe
  1 sibling, 1 reply; 24+ messages in thread
From: Christophe Leroy @ 2021-03-16  6:32 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> setup_text_poke_area() is a late init call so it runs before
> mark_rodata_ro() and after the init calls. This lets all the init code
> patching simply write to their locations. In the future, kprobes is
> going to allocate its instruction pages RO which means they will need
> setup_text__poke_area() to have been already called for their code
> patching. However, init_kprobes() (which allocates and patches some
> instruction pages) is an early init call so it happens before
> setup_text__poke_area().
> 
> start_kernel() calls poking_init() before any of the init calls. On
> powerpc, poking_init() is currently a nop. setup_text_poke_area() relies
> on kernel virtual memory, cpu hotplug and per_cpu_areas being setup.
> setup_per_cpu_areas(), boot_cpu_hotplug_init() and mm_init() are called
> before poking_init().
> 
> Turn setup_text_poke_area() into poking_init().
> 
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
> v9: New to series
> ---
>   arch/powerpc/lib/code-patching.c | 12 ++++--------
>   1 file changed, 4 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
> index 2333625b5e31..b28afa1133db 100644
> --- a/arch/powerpc/lib/code-patching.c
> +++ b/arch/powerpc/lib/code-patching.c
> @@ -65,14 +65,11 @@ static int text_area_cpu_down(unsigned int cpu)
>   }
>   
>   /*
> - * Run as a late init call. This allows all the boot time patching to be done
> - * simply by patching the code, and then we're called here prior to
> - * mark_rodata_ro(), which happens after all init calls are run. Although
> - * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
> - * it as being preferable to a kernel that will crash later when someone tries
> - * to use patch_instruction().
> + * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
> + * we judge it as being preferable to a kernel that will crash later when
> + * someone tries to use patch_instruction().

Please use WARN_ON(), see why at https://www.kernel.org/doc/html/latest/process/deprecated.html

>    */
> -static int __init setup_text_poke_area(void)
> +int __init poking_init(void)
>   {
>   	BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
>   		"powerpc/text_poke:online", text_area_cpu_up,
> @@ -80,7 +77,6 @@ static int __init setup_text_poke_area(void)
>   
>   	return 0;
>   }
> -late_initcall(setup_text_poke_area);
>   
>   /*
>    * This can be called for kernel text or a module.
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-16  3:17 ` [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO Jordan Niethe
@ 2021-03-16  6:44   ` Christophe Leroy
  2021-03-17  0:50     ` Jordan Niethe
  2021-03-17  0:52     ` Jordan Niethe
  2021-03-17  6:12   ` Christophe Leroy
  1 sibling, 2 replies; 24+ messages in thread
From: Christophe Leroy @ 2021-03-16  6:44 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> From: Russell Currey <ruscur@russell.cc>
> 
> With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
> W+X page at boot by default.  This can be tested with
> CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
> kernel log during boot.
> 

This text is confusing. I don't understand what is the status before the patch, and what is the 
status after.

"there will be one ...", does it mean after the patch ?

> Add an arch specific insn page allocator which returns RO pages if
> STRICT_KERNEL_RWX is enabled. This page is only written to with
> patch_instruction() which is able to write RO pages.

"an" or "the" arch specific insn page allocator ?

> 
> Reviewed-by: Daniel Axtens <dja@axtens.net>
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> [jpn: Reword commit message, switch from vmalloc_exec(), add
>        free_insn_page()]
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
> v9: - vmalloc_exec() no longer exists
>      - Set the page to RW before freeing it
> ---
>   arch/powerpc/kernel/kprobes.c | 22 ++++++++++++++++++++++
>   1 file changed, 22 insertions(+)
> 
> diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> index 01ab2163659e..bb7e4d321988 100644
> --- a/arch/powerpc/kernel/kprobes.c
> +++ b/arch/powerpc/kernel/kprobes.c
> @@ -25,6 +25,8 @@
>   #include <asm/sections.h>
>   #include <asm/inst.h>
>   #include <linux/uaccess.h>
> +#include <linux/set_memory.h>
> +#include <linux/vmalloc.h>
>   
>   DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
>   DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> @@ -103,6 +105,26 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
>   	return addr;
>   }
>   
> +void *alloc_insn_page(void)
> +{
> +	void *page = vmalloc(PAGE_SIZE);

Can't do that on book3s/32, see https://github.com/linuxppc/linux/commit/6ca05532 and 
https://github.com/linuxppc/linux/commit/7fbc22ce

Should do:
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL,
				    PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
				    __builtin_return_address(0));


To keep it simple, you'll probably need to define MODULES_VADDR and MODULES_END as resp 
VMALLOC_START and VMALLOC_END when they are not defined, maybe in asm/pgtable.h

> +
> +	if (!page)
> +		return NULL;
> +
> +	set_memory_ro((unsigned long)page, 1);
> +	set_memory_x((unsigned long)page, 1);
> +
> +	return page;
> +}
> +
> +void free_insn_page(void *page)
> +{
> +	set_memory_nx((unsigned long)page, 1);
> +	set_memory_rw((unsigned long)page, 1);
> +	vfree(page);
> +}
> +
>   int arch_prepare_kprobe(struct kprobe *p)
>   {
>   	int ret = 0;
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime
  2021-03-16  3:17 ` [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime Jordan Niethe
@ 2021-03-16  6:47   ` Christophe Leroy
  0 siblings, 0 replies; 24+ messages in thread
From: Christophe Leroy @ 2021-03-16  6:47 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, Kees Cook, npiggin, naveen.n.rao, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> From: Russell Currey <ruscur@russell.cc>
> 
> Very rudimentary, just
> 
> 	echo 1 > [debugfs]/check_wx_pages
> 
> and check the kernel log.  Useful for testing strict module RWX.
> 
> Updated the Kconfig entry to reflect this.
> 
> Also fixed a typo.

Why not just perform the test everytime someone dumps kernel_page_tables ?

> 
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
>   arch/powerpc/Kconfig.debug      |  6 ++++--
>   arch/powerpc/mm/ptdump/ptdump.c | 21 ++++++++++++++++++++-
>   2 files changed, 24 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
> index ae084357994e..56e99e9a30d9 100644
> --- a/arch/powerpc/Kconfig.debug
> +++ b/arch/powerpc/Kconfig.debug
> @@ -371,7 +371,7 @@ config PPC_PTDUMP
>   	  If you are unsure, say N.
>   
>   config PPC_DEBUG_WX
> -	bool "Warn on W+X mappings at boot"
> +	bool "Warn on W+X mappings at boot & enable manual checks at runtime"
>   	depends on PPC_PTDUMP && STRICT_KERNEL_RWX
>   	help
>   	  Generate a warning if any W+X mappings are found at boot.
> @@ -385,7 +385,9 @@ config PPC_DEBUG_WX
>   	  of other unfixed kernel bugs easier.
>   
>   	  There is no runtime or memory usage effect of this option
> -	  once the kernel has booted up - it's a one time check.
> +	  once the kernel has booted up, it only automatically checks once.
> +
> +	  Enables the "check_wx_pages" debugfs entry for checking at runtime.
>   
>   	  If in doubt, say "Y".
>   
> diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
> index aca354fb670b..78497d57b66b 100644
> --- a/arch/powerpc/mm/ptdump/ptdump.c
> +++ b/arch/powerpc/mm/ptdump/ptdump.c
> @@ -4,7 +4,7 @@
>    *
>    * This traverses the kernel pagetables and dumps the
>    * information about the used sections of memory to
> - * /sys/kernel/debug/kernel_pagetables.
> + * /sys/kernel/debug/kernel_page_tables.
>    *
>    * Derived from the arm64 implementation:
>    * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
> @@ -459,6 +459,25 @@ void ptdump_check_wx(void)
>   	else
>   		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
>   }
> +
> +static int check_wx_debugfs_set(void *data, u64 val)
> +{
> +	if (val != 1ULL)
> +		return -EINVAL;
> +
> +	ptdump_check_wx();
> +
> +	return 0;
> +}
> +
> +DEFINE_SIMPLE_ATTRIBUTE(check_wx_fops, NULL, check_wx_debugfs_set, "%llu\n");
> +
> +static int ptdump_check_wx_init(void)
> +{
> +	return debugfs_create_file("check_wx_pages", 0200, NULL,
> +				   NULL, &check_wx_fops) ? 0 : -ENOMEM;
> +}
> +device_initcall(ptdump_check_wx_init);
>   #endif
>   
>   static int ptdump_init(void)
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX
  2021-03-16  3:17 ` [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX Jordan Niethe
@ 2021-03-16  6:51   ` Christophe Leroy
  2021-03-17  2:15     ` Jordan Niethe
  0 siblings, 1 reply; 24+ messages in thread
From: Christophe Leroy @ 2021-03-16  6:51 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, naveen.n.rao, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> From: Russell Currey <ruscur@russell.cc>
> 
> To enable strict module RWX on powerpc, set:
> 
>      CONFIG_STRICT_MODULE_RWX=y
> 
> You should also have CONFIG_STRICT_KERNEL_RWX=y set to have any real
> security benefit.
> 
> ARCH_HAS_STRICT_MODULE_RWX is set to require ARCH_HAS_STRICT_KERNEL_RWX.
> This is due to a quirk in arch/Kconfig and arch/powerpc/Kconfig that
> makes STRICT_MODULE_RWX *on by default* in configurations where
> STRICT_KERNEL_RWX is *unavailable*.

Not that easy on book3s/32. On it, you can't protect memory against execution on a page basis, you 
can only do it on a segment basis. So in order to do that, when would need to allocate to areas of 
memory: one in module space for text and one in vmalloc space for data.

See https://github.com/linuxppc/linux/commit/6ca05532 and 
https://github.com/linuxppc/linux/commit/7fbc22ce


> 
> Since this doesn't make much sense, and module RWX without kernel RWX
> doesn't make much sense, having the same dependencies as kernel RWX
> works around this problem.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
>   arch/powerpc/Kconfig | 1 +
>   1 file changed, 1 insertion(+)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 4498a27ac9db..d9cadc4212d0 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -137,6 +137,7 @@ config PPC
>   	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
>   	select ARCH_HAS_SET_MEMORY
>   	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
> +	select ARCH_HAS_STRICT_MODULE_RWX	if ARCH_HAS_STRICT_KERNEL_RWX
>   	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
>   	select ARCH_HAS_UACCESS_FLUSHCACHE
>   	select ARCH_HAS_COPY_MC			if PPC64
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 7/8] powerpc/mm: implement set_memory_attr()
  2021-03-16  3:17 ` [PATCH v9 7/8] powerpc/mm: implement set_memory_attr() Jordan Niethe
@ 2021-03-16  7:25   ` Christophe Leroy
  2021-03-17  0:54     ` Jordan Niethe
  0 siblings, 1 reply; 24+ messages in thread
From: Christophe Leroy @ 2021-03-16  7:25 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, npiggin, kbuild test robot, naveen.n.rao, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> From: Christophe Leroy <christophe.leroy@c-s.fr>

Can you please update the whole series with my new email address: christophe.leroy@csgroup.eu



> 
> In addition to the set_memory_xx() functions which allows to change
> the memory attributes of not (yet) used memory regions, implement a
> set_memory_attr() function to:
> - set the final memory protection after init on currently used
> kernel regions.
> - enable/disable kernel memory regions in the scope of DEBUG_PAGEALLOC.
> 
> Unlike the set_memory_xx() which can act in three step as the regions
> are unused, this function must modify 'on the fly' as the kernel is
> executing from them. At the moment only PPC32 will use it and changing
> page attributes on the fly is not an issue.
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> Reported-by: kbuild test robot <lkp@intel.com>
> [ruscur: cast "data" to unsigned long instead of int]
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
>   arch/powerpc/include/asm/set_memory.h |  2 ++
>   arch/powerpc/mm/pageattr.c            | 33 +++++++++++++++++++++++++++
>   2 files changed, 35 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> index 64011ea444b4..b040094f7920 100644
> --- a/arch/powerpc/include/asm/set_memory.h
> +++ b/arch/powerpc/include/asm/set_memory.h
> @@ -29,4 +29,6 @@ static inline int set_memory_x(unsigned long addr, int numpages)
>   	return change_memory_attr(addr, numpages, SET_MEMORY_X);
>   }
>   
> +int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
> +
>   #endif
> diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> index 2da3fbab6ff7..2fde1b195c85 100644
> --- a/arch/powerpc/mm/pageattr.c
> +++ b/arch/powerpc/mm/pageattr.c
> @@ -79,3 +79,36 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
>   	return apply_to_existing_page_range(&init_mm, start, sz,
>   					    change_page_attr, (void *)action);
>   }
> +
> +/*
> + * Set the attributes of a page:
> + *
> + * This function is used by PPC32 at the end of init to set final kernel memory
> + * protection. It includes changing the maping of the page it is executing from
> + * and data pages it is using.
> + */
> +static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	pgprot_t prot = __pgprot((unsigned long)data);
> +
> +	spin_lock(&init_mm.page_table_lock);
> +
> +	set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
> +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> +
> +	spin_unlock(&init_mm.page_table_lock);
> +
> +	return 0;
> +}
> +
> +int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
> +{
> +	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> +	unsigned long sz = numpages * PAGE_SIZE;
> +
> +	if (numpages <= 0)
> +		return 0;
> +
> +	return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
> +					    (void *)pgprot_val(prot));
> +}
> 

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier
  2021-03-16  6:32   ` Christophe Leroy
@ 2021-03-17  0:38     ` Jordan Niethe
  2021-03-17 12:04       ` Michael Ellerman
  0 siblings, 1 reply; 24+ messages in thread
From: Jordan Niethe @ 2021-03-17  0:38 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

On Tue, Mar 16, 2021 at 5:32 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > setup_text_poke_area() is a late init call so it runs before
> > mark_rodata_ro() and after the init calls. This lets all the init code
> > patching simply write to their locations. In the future, kprobes is
> > going to allocate its instruction pages RO which means they will need
> > setup_text__poke_area() to have been already called for their code
> > patching. However, init_kprobes() (which allocates and patches some
> > instruction pages) is an early init call so it happens before
> > setup_text__poke_area().
> >
> > start_kernel() calls poking_init() before any of the init calls. On
> > powerpc, poking_init() is currently a nop. setup_text_poke_area() relies
> > on kernel virtual memory, cpu hotplug and per_cpu_areas being setup.
> > setup_per_cpu_areas(), boot_cpu_hotplug_init() and mm_init() are called
> > before poking_init().
> >
> > Turn setup_text_poke_area() into poking_init().
> >
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> > v9: New to series
> > ---
> >   arch/powerpc/lib/code-patching.c | 12 ++++--------
> >   1 file changed, 4 insertions(+), 8 deletions(-)
> >
> > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
> > index 2333625b5e31..b28afa1133db 100644
> > --- a/arch/powerpc/lib/code-patching.c
> > +++ b/arch/powerpc/lib/code-patching.c
> > @@ -65,14 +65,11 @@ static int text_area_cpu_down(unsigned int cpu)
> >   }
> >
> >   /*
> > - * Run as a late init call. This allows all the boot time patching to be done
> > - * simply by patching the code, and then we're called here prior to
> > - * mark_rodata_ro(), which happens after all init calls are run. Although
> > - * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
> > - * it as being preferable to a kernel that will crash later when someone tries
> > - * to use patch_instruction().
> > + * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
> > + * we judge it as being preferable to a kernel that will crash later when
> > + * someone tries to use patch_instruction().
>
> Please use WARN_ON(), see why at https://www.kernel.org/doc/html/latest/process/deprecated.html
Ok I can include a change to WARN_ON() as a separate patch.
>
> >    */
> > -static int __init setup_text_poke_area(void)
> > +int __init poking_init(void)
> >   {
> >       BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
> >               "powerpc/text_poke:online", text_area_cpu_up,
> > @@ -80,7 +77,6 @@ static int __init setup_text_poke_area(void)
> >
> >       return 0;
> >   }
> > -late_initcall(setup_text_poke_area);
> >
> >   /*
> >    * This can be called for kernel text or a module.
> >

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-16  6:44   ` Christophe Leroy
@ 2021-03-17  0:50     ` Jordan Niethe
  2021-03-17  0:52     ` Jordan Niethe
  1 sibling, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-17  0:50 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

On Tue, Mar 16, 2021 at 5:44 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > From: Russell Currey <ruscur@russell.cc>
> >
> > With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
> > W+X page at boot by default.  This can be tested with
> > CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
> > kernel log during boot.
> >
>
> This text is confusing. I don't understand what is the status before the patch, and what is the
> status after.
Before the patch kprobes is allocating W+X pages. This can be seen in
the kernel log with those debug options on.
After the patch kprobes  no longer allocate W+X pages.
I will reword it to more clear.
>
> "there will be one ...", does it mean after the patch ?
No, before, after there will be none.
>
> > Add an arch specific insn page allocator which returns RO pages if
> > STRICT_KERNEL_RWX is enabled. This page is only written to with
> > patch_instruction() which is able to write RO pages.
>
> "an" or "the" arch specific insn page allocator ?
Hmm, will go with "the arch specific insn page allocator for powerpc".
>
> >
> > Reviewed-by: Daniel Axtens <dja@axtens.net>
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> > [jpn: Reword commit message, switch from vmalloc_exec(), add
> >        free_insn_page()]
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> > v9: - vmalloc_exec() no longer exists
> >      - Set the page to RW before freeing it
> > ---
> >   arch/powerpc/kernel/kprobes.c | 22 ++++++++++++++++++++++
> >   1 file changed, 22 insertions(+)
> >
> > diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> > index 01ab2163659e..bb7e4d321988 100644
> > --- a/arch/powerpc/kernel/kprobes.c
> > +++ b/arch/powerpc/kernel/kprobes.c
> > @@ -25,6 +25,8 @@
> >   #include <asm/sections.h>
> >   #include <asm/inst.h>
> >   #include <linux/uaccess.h>
> > +#include <linux/set_memory.h>
> > +#include <linux/vmalloc.h>
> >
> >   DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> >   DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> > @@ -103,6 +105,26 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
> >       return addr;
> >   }
> >
> > +void *alloc_insn_page(void)
> > +{
> > +     void *page = vmalloc(PAGE_SIZE);
>
> Can't do that on book3s/32, see https://github.com/linuxppc/linux/commit/6ca05532 and
> https://github.com/linuxppc/linux/commit/7fbc22ce
>
> Should do:
>         return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL,
>                                     PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
>                                     __builtin_return_address(0));
>
>
> To keep it simple, you'll probably need to define MODULES_VADDR and MODULES_END as resp
> VMALLOC_START and VMALLOC_END when they are not defined, maybe in asm/pgtable.h
>
> > +
> > +     if (!page)
> > +             return NULL;
> > +
> > +     set_memory_ro((unsigned long)page, 1);
> > +     set_memory_x((unsigned long)page, 1);
> > +
> > +     return page;
> > +}
> > +
> > +void free_insn_page(void *page)
> > +{
> > +     set_memory_nx((unsigned long)page, 1);
> > +     set_memory_rw((unsigned long)page, 1);
> > +     vfree(page);
> > +}
> > +
> >   int arch_prepare_kprobe(struct kprobe *p)
> >   {
> >       int ret = 0;
> >

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-16  6:44   ` Christophe Leroy
  2021-03-17  0:50     ` Jordan Niethe
@ 2021-03-17  0:52     ` Jordan Niethe
  1 sibling, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-17  0:52 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

On Tue, Mar 16, 2021 at 5:44 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > From: Russell Currey <ruscur@russell.cc>
> >
> > With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
> > W+X page at boot by default.  This can be tested with
> > CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
> > kernel log during boot.
> >
>
> This text is confusing. I don't understand what is the status before the patch, and what is the
> status after.
>
> "there will be one ...", does it mean after the patch ?
>
> > Add an arch specific insn page allocator which returns RO pages if
> > STRICT_KERNEL_RWX is enabled. This page is only written to with
> > patch_instruction() which is able to write RO pages.
>
> "an" or "the" arch specific insn page allocator ?
>
> >
> > Reviewed-by: Daniel Axtens <dja@axtens.net>
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> > [jpn: Reword commit message, switch from vmalloc_exec(), add
> >        free_insn_page()]
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> > v9: - vmalloc_exec() no longer exists
> >      - Set the page to RW before freeing it
> > ---
> >   arch/powerpc/kernel/kprobes.c | 22 ++++++++++++++++++++++
> >   1 file changed, 22 insertions(+)
> >
> > diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> > index 01ab2163659e..bb7e4d321988 100644
> > --- a/arch/powerpc/kernel/kprobes.c
> > +++ b/arch/powerpc/kernel/kprobes.c
> > @@ -25,6 +25,8 @@
> >   #include <asm/sections.h>
> >   #include <asm/inst.h>
> >   #include <linux/uaccess.h>
> > +#include <linux/set_memory.h>
> > +#include <linux/vmalloc.h>
> >
> >   DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> >   DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> > @@ -103,6 +105,26 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
> >       return addr;
> >   }
> >
> > +void *alloc_insn_page(void)
> > +{
> > +     void *page = vmalloc(PAGE_SIZE);
>
> Can't do that on book3s/32, see https://github.com/linuxppc/linux/commit/6ca05532 and
> https://github.com/linuxppc/linux/commit/7fbc22ce
>
> Should do:
>         return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL,
>                                     PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
>                                     __builtin_return_address(0));
>
>
> To keep it simple, you'll probably need to define MODULES_VADDR and MODULES_END as resp
> VMALLOC_START and VMALLOC_END when they are not defined, maybe in asm/pgtable.h
Thank you, I had overlooked that. I will do it like that in the next revision.
>
> > +
> > +     if (!page)
> > +             return NULL;
> > +
> > +     set_memory_ro((unsigned long)page, 1);
> > +     set_memory_x((unsigned long)page, 1);
> > +
> > +     return page;
> > +}
> > +
> > +void free_insn_page(void *page)
> > +{
> > +     set_memory_nx((unsigned long)page, 1);
> > +     set_memory_rw((unsigned long)page, 1);
> > +     vfree(page);
> > +}
> > +
> >   int arch_prepare_kprobe(struct kprobe *p)
> >   {
> >       int ret = 0;
> >

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 7/8] powerpc/mm: implement set_memory_attr()
  2021-03-16  7:25   ` Christophe Leroy
@ 2021-03-17  0:54     ` Jordan Niethe
  0 siblings, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-17  0:54 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	Daniel Axtens, linuxppc-dev, kbuild test robot

On Tue, Mar 16, 2021 at 6:25 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > From: Christophe Leroy <christophe.leroy@c-s.fr>
>
> Can you please update the whole series with my new email address: christophe.leroy@csgroup.eu
Of course, I shall do that.
>
>
>
> >
> > In addition to the set_memory_xx() functions which allows to change
> > the memory attributes of not (yet) used memory regions, implement a
> > set_memory_attr() function to:
> > - set the final memory protection after init on currently used
> > kernel regions.
> > - enable/disable kernel memory regions in the scope of DEBUG_PAGEALLOC.
> >
> > Unlike the set_memory_xx() which can act in three step as the regions
> > are unused, this function must modify 'on the fly' as the kernel is
> > executing from them. At the moment only PPC32 will use it and changing
> > page attributes on the fly is not an issue.
> >
> > Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> > Reported-by: kbuild test robot <lkp@intel.com>
> > [ruscur: cast "data" to unsigned long instead of int]
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> >   arch/powerpc/include/asm/set_memory.h |  2 ++
> >   arch/powerpc/mm/pageattr.c            | 33 +++++++++++++++++++++++++++
> >   2 files changed, 35 insertions(+)
> >
> > diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> > index 64011ea444b4..b040094f7920 100644
> > --- a/arch/powerpc/include/asm/set_memory.h
> > +++ b/arch/powerpc/include/asm/set_memory.h
> > @@ -29,4 +29,6 @@ static inline int set_memory_x(unsigned long addr, int numpages)
> >       return change_memory_attr(addr, numpages, SET_MEMORY_X);
> >   }
> >
> > +int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
> > +
> >   #endif
> > diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> > index 2da3fbab6ff7..2fde1b195c85 100644
> > --- a/arch/powerpc/mm/pageattr.c
> > +++ b/arch/powerpc/mm/pageattr.c
> > @@ -79,3 +79,36 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
> >       return apply_to_existing_page_range(&init_mm, start, sz,
> >                                           change_page_attr, (void *)action);
> >   }
> > +
> > +/*
> > + * Set the attributes of a page:
> > + *
> > + * This function is used by PPC32 at the end of init to set final kernel memory
> > + * protection. It includes changing the maping of the page it is executing from
> > + * and data pages it is using.
> > + */
> > +static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
> > +{
> > +     pgprot_t prot = __pgprot((unsigned long)data);
> > +
> > +     spin_lock(&init_mm.page_table_lock);
> > +
> > +     set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
> > +     flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> > +
> > +     spin_unlock(&init_mm.page_table_lock);
> > +
> > +     return 0;
> > +}
> > +
> > +int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
> > +{
> > +     unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> > +     unsigned long sz = numpages * PAGE_SIZE;
> > +
> > +     if (numpages <= 0)
> > +             return 0;
> > +
> > +     return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
> > +                                         (void *)pgprot_val(prot));
> > +}
> >

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX
  2021-03-16  6:51   ` Christophe Leroy
@ 2021-03-17  2:15     ` Jordan Niethe
  2021-03-17  5:43       ` Christophe Leroy
  0 siblings, 1 reply; 24+ messages in thread
From: Jordan Niethe @ 2021-03-17  2:15 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

On Tue, Mar 16, 2021 at 5:51 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > From: Russell Currey <ruscur@russell.cc>
> >
> > To enable strict module RWX on powerpc, set:
> >
> >      CONFIG_STRICT_MODULE_RWX=y
> >
> > You should also have CONFIG_STRICT_KERNEL_RWX=y set to have any real
> > security benefit.
> >
> > ARCH_HAS_STRICT_MODULE_RWX is set to require ARCH_HAS_STRICT_KERNEL_RWX.
> > This is due to a quirk in arch/Kconfig and arch/powerpc/Kconfig that
> > makes STRICT_MODULE_RWX *on by default* in configurations where
> > STRICT_KERNEL_RWX is *unavailable*.
>
> Not that easy on book3s/32. On it, you can't protect memory against execution on a page basis, you
> can only do it on a segment basis. So in order to do that, when would need to allocate to areas of
> memory: one in module space for text and one in vmalloc space for data.
>
> See https://github.com/linuxppc/linux/commit/6ca05532 and
> https://github.com/linuxppc/linux/commit/7fbc22ce
Would it be ok to just make ARCH_HAS_STRICT_MODULE_RWX conditional on 64s?
>
>
> >
> > Since this doesn't make much sense, and module RWX without kernel RWX
> > doesn't make much sense, having the same dependencies as kernel RWX
> > works around this problem.
> >
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> >   arch/powerpc/Kconfig | 1 +
> >   1 file changed, 1 insertion(+)
> >
> > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> > index 4498a27ac9db..d9cadc4212d0 100644
> > --- a/arch/powerpc/Kconfig
> > +++ b/arch/powerpc/Kconfig
> > @@ -137,6 +137,7 @@ config PPC
> >       select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
> >       select ARCH_HAS_SET_MEMORY
> >       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
> > +     select ARCH_HAS_STRICT_MODULE_RWX       if ARCH_HAS_STRICT_KERNEL_RWX
> >       select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
> >       select ARCH_HAS_UACCESS_FLUSHCACHE
> >       select ARCH_HAS_COPY_MC                 if PPC64
> >

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX
  2021-03-17  2:15     ` Jordan Niethe
@ 2021-03-17  5:43       ` Christophe Leroy
  0 siblings, 0 replies; 24+ messages in thread
From: Christophe Leroy @ 2021-03-17  5:43 UTC (permalink / raw)
  To: Jordan Niethe
  Cc: ajd, Nicholas Piggin, naveen.n.rao, linuxppc-dev, Daniel Axtens



Le 17/03/2021 à 03:15, Jordan Niethe a écrit :
> On Tue, Mar 16, 2021 at 5:51 PM Christophe Leroy
> <christophe.leroy@csgroup.eu> wrote:
>>
>>
>>
>> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
>>> From: Russell Currey <ruscur@russell.cc>
>>>
>>> To enable strict module RWX on powerpc, set:
>>>
>>>       CONFIG_STRICT_MODULE_RWX=y
>>>
>>> You should also have CONFIG_STRICT_KERNEL_RWX=y set to have any real
>>> security benefit.
>>>
>>> ARCH_HAS_STRICT_MODULE_RWX is set to require ARCH_HAS_STRICT_KERNEL_RWX.
>>> This is due to a quirk in arch/Kconfig and arch/powerpc/Kconfig that
>>> makes STRICT_MODULE_RWX *on by default* in configurations where
>>> STRICT_KERNEL_RWX is *unavailable*.
>>
>> Not that easy on book3s/32. On it, you can't protect memory against execution on a page basis, you
>> can only do it on a segment basis. So in order to do that, when would need to allocate to areas of
>> memory: one in module space for text and one in vmalloc space for data.
>>
>> See https://github.com/linuxppc/linux/commit/6ca05532 and
>> https://github.com/linuxppc/linux/commit/7fbc22ce
> Would it be ok to just make ARCH_HAS_STRICT_MODULE_RWX conditional on 64s?

I think making it conditional on !PPC_BOOK3S_604 should be enough.

>>
>>
>>>
>>> Since this doesn't make much sense, and module RWX without kernel RWX
>>> doesn't make much sense, having the same dependencies as kernel RWX
>>> works around this problem.
>>>
>>> Signed-off-by: Russell Currey <ruscur@russell.cc>
>>> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
>>> ---
>>>    arch/powerpc/Kconfig | 1 +
>>>    1 file changed, 1 insertion(+)
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 4498a27ac9db..d9cadc4212d0 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -137,6 +137,7 @@ config PPC
>>>        select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
>>>        select ARCH_HAS_SET_MEMORY
>>>        select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
>>> +     select ARCH_HAS_STRICT_MODULE_RWX       if ARCH_HAS_STRICT_KERNEL_RWX
>>>        select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
>>>        select ARCH_HAS_UACCESS_FLUSHCACHE
>>>        select ARCH_HAS_COPY_MC                 if PPC64
>>>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-16  3:17 ` [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO Jordan Niethe
  2021-03-16  6:44   ` Christophe Leroy
@ 2021-03-17  6:12   ` Christophe Leroy
  2021-03-18  2:42     ` Jordan Niethe
  1 sibling, 1 reply; 24+ messages in thread
From: Christophe Leroy @ 2021-03-17  6:12 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, naveen.n.rao, ajd, npiggin, dja



Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> From: Russell Currey <ruscur@russell.cc>
> 
> With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
> W+X page at boot by default.  This can be tested with
> CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
> kernel log during boot.
> 
> Add an arch specific insn page allocator which returns RO pages if
> STRICT_KERNEL_RWX is enabled. This page is only written to with
> patch_instruction() which is able to write RO pages.
> 

Did you investigate BPF ? The problematic looks more or less similar to kprobe:

bpf_jit_compile() in arch/powerpc/net/bpf_jit_comp.c calls module_alloc(), which provides it with 
PAGE_KERNEL_TEXT memory, ie RWX. That function is only used on PPC32 which still has Classic BPF, 
and this is about to go away with future series 
https://patchwork.ozlabs.org/project/linuxppc-dev/cover/cover.1608112796.git.christophe.leroy@csgroup.eu/

PPC64 has Extended BPF instead, and PPC32 will it the future too.
bpf_int_jit_compile() in arch/powerpc/net/bpf_jit_comp64.c calls bpf_jit_binary_alloc() which uses 
bpf_jit_alloc_exec().

bpf_jit_alloc_exec() is a weak function that should be redefined for powerpc I think, more or less 
like alloc_insn_page() for kprobes.

Christophe

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier
  2021-03-17  0:38     ` Jordan Niethe
@ 2021-03-17 12:04       ` Michael Ellerman
  0 siblings, 0 replies; 24+ messages in thread
From: Michael Ellerman @ 2021-03-17 12:04 UTC (permalink / raw)
  To: Jordan Niethe, Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

Jordan Niethe <jniethe5@gmail.com> writes:
> On Tue, Mar 16, 2021 at 5:32 PM Christophe Leroy
> <christophe.leroy@csgroup.eu> wrote:
>>
>> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
>> > setup_text_poke_area() is a late init call so it runs before
>> > mark_rodata_ro() and after the init calls. This lets all the init code
>> > patching simply write to their locations. In the future, kprobes is
>> > going to allocate its instruction pages RO which means they will need
>> > setup_text__poke_area() to have been already called for their code
>> > patching. However, init_kprobes() (which allocates and patches some
>> > instruction pages) is an early init call so it happens before
>> > setup_text__poke_area().
>> >
>> > start_kernel() calls poking_init() before any of the init calls. On
>> > powerpc, poking_init() is currently a nop. setup_text_poke_area() relies
>> > on kernel virtual memory, cpu hotplug and per_cpu_areas being setup.
>> > setup_per_cpu_areas(), boot_cpu_hotplug_init() and mm_init() are called
>> > before poking_init().
>> >
>> > Turn setup_text_poke_area() into poking_init().
>> >
>> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
>> > ---
>> > v9: New to series
>> > ---
>> >   arch/powerpc/lib/code-patching.c | 12 ++++--------
>> >   1 file changed, 4 insertions(+), 8 deletions(-)
>> >
>> > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
>> > index 2333625b5e31..b28afa1133db 100644
>> > --- a/arch/powerpc/lib/code-patching.c
>> > +++ b/arch/powerpc/lib/code-patching.c
>> > @@ -65,14 +65,11 @@ static int text_area_cpu_down(unsigned int cpu)
>> >   }
>> >
>> >   /*
>> > - * Run as a late init call. This allows all the boot time patching to be done
>> > - * simply by patching the code, and then we're called here prior to
>> > - * mark_rodata_ro(), which happens after all init calls are run. Although
>> > - * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
>> > - * it as being preferable to a kernel that will crash later when someone tries
>> > - * to use patch_instruction().
>> > + * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
>> > + * we judge it as being preferable to a kernel that will crash later when
>> > + * someone tries to use patch_instruction().
>>
>> Please use WARN_ON(), see why at https://www.kernel.org/doc/html/latest/process/deprecated.html

> Ok I can include a change to WARN_ON() as a separate patch.

I'm not convinced we should change this to a WARN_ON.

Being able to patch the kernel text is not optional.

Patching jump labels has no ability to return an error, and the code
that uses them has no concept of the jump label not taking the correct
polarity.

Silently failing the patch is like randomly flipping an if condition
somewhere in the kernel and hoping that everything will continue
working.

cheers

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO
  2021-03-17  6:12   ` Christophe Leroy
@ 2021-03-18  2:42     ` Jordan Niethe
  0 siblings, 0 replies; 24+ messages in thread
From: Jordan Niethe @ 2021-03-18  2:42 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Christophe Leroy, ajd, Nicholas Piggin, naveen.n.rao,
	linuxppc-dev, Daniel Axtens

On Wed, Mar 17, 2021 at 5:12 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
>
>
> Le 16/03/2021 à 04:17, Jordan Niethe a écrit :
> > From: Russell Currey <ruscur@russell.cc>
> >
> > With CONFIG_STRICT_KERNEL_RWX=y and CONFIG_KPROBES=y, there will be one
> > W+X page at boot by default.  This can be tested with
> > CONFIG_PPC_PTDUMP=y and CONFIG_PPC_DEBUG_WX=y set, and checking the
> > kernel log during boot.
> >
> > Add an arch specific insn page allocator which returns RO pages if
> > STRICT_KERNEL_RWX is enabled. This page is only written to with
> > patch_instruction() which is able to write RO pages.
> >
>
> Did you investigate BPF ? The problematic looks more or less similar to kprobe:
>
> bpf_jit_compile() in arch/powerpc/net/bpf_jit_comp.c calls module_alloc(), which provides it with
> PAGE_KERNEL_TEXT memory, ie RWX. That function is only used on PPC32 which still has Classic BPF,
> and this is about to go away with future series
> https://patchwork.ozlabs.org/project/linuxppc-dev/cover/cover.1608112796.git.christophe.leroy@csgroup.eu/
>
> PPC64 has Extended BPF instead, and PPC32 will it the future too.
> bpf_int_jit_compile() in arch/powerpc/net/bpf_jit_comp64.c calls bpf_jit_binary_alloc() which uses
> bpf_jit_alloc_exec().
>
> bpf_jit_alloc_exec() is a weak function that should be redefined for powerpc I think, more or less
> like alloc_insn_page() for kprobes.
Thanks, that is a good point. I will handle bpf with the next revision.
>
> Christophe

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines
  2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
                   ` (6 preceding siblings ...)
  2021-03-16  3:17 ` [PATCH v9 8/8] powerpc/32: use set_memory_attr() Jordan Niethe
@ 2021-03-19  1:19 ` Michael Ellerman
  7 siblings, 0 replies; 24+ messages in thread
From: Michael Ellerman @ 2021-03-19  1:19 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev
  Cc: christophe.leroy, ajd, Jordan Niethe, npiggin, naveen.n.rao, dja

Jordan Niethe <jniethe5@gmail.com> writes:
> From: Russell Currey <ruscur@russell.cc>
>
> The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
> and are generally useful primitives to have.  This implementation is
> designed to be completely generic across powerpc's many MMUs.
>
> It's possible that this could be optimised to be faster for specific
> MMUs, but the focus is on having a generic and safe implementation for
> now.

This won't work for the linear mapping with HPT on book3s 64. Because
the linear mapping is not in the kernel page tables.

apply_to_existing_page_range() should work that out and return an error.
But I'm not sure if callers handle that well or at all.

We might want to add a WARN_ON_ONCE() in change_memory_attr(), at least
to begin with, to report those errors, so we know when we are failing to
set permissions. Rather than silently failing and then crashing some
time later due to the permissions being wrong for some mapping.

cheers


> This implementation does not handle cases where the caller is attempting
> to change the mapping of the page it is executing from, or if another
> CPU is concurrently using the page being altered.  These cases likely
> shouldn't happen, but a more complex implementation with MMU-specific code
> could safely handle them, so that is left as a TODO for now.
>
> These functions do nothing if STRICT_KERNEL_RWX is not enabled.
>
> Reviewed-by: Daniel Axtens <dja@axtens.net>
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> [jpn: rebase on next plus "powerpc/mm/64s: Allow STRICT_KERNEL_RWX again"]
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
>  arch/powerpc/Kconfig                  |  1 +
>  arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
>  arch/powerpc/mm/Makefile              |  2 +-
>  arch/powerpc/mm/pageattr.c            | 81 +++++++++++++++++++++++++++
>  4 files changed, 115 insertions(+), 1 deletion(-)
>  create mode 100644 arch/powerpc/include/asm/set_memory.h
>  create mode 100644 arch/powerpc/mm/pageattr.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index fc7f5c5933e6..4498a27ac9db 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -135,6 +135,7 @@ config PPC
>  	select ARCH_HAS_MEMBARRIER_CALLBACKS
>  	select ARCH_HAS_MEMBARRIER_SYNC_CORE
>  	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
> +	select ARCH_HAS_SET_MEMORY
>  	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
>  	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
>  	select ARCH_HAS_UACCESS_FLUSHCACHE
> diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> new file mode 100644
> index 000000000000..64011ea444b4
> --- /dev/null
> +++ b/arch/powerpc/include/asm/set_memory.h
> @@ -0,0 +1,32 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_POWERPC_SET_MEMORY_H
> +#define _ASM_POWERPC_SET_MEMORY_H
> +
> +#define SET_MEMORY_RO	0
> +#define SET_MEMORY_RW	1
> +#define SET_MEMORY_NX	2
> +#define SET_MEMORY_X	3
> +
> +int change_memory_attr(unsigned long addr, int numpages, long action);
> +
> +static inline int set_memory_ro(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_RO);
> +}
> +
> +static inline int set_memory_rw(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_RW);
> +}
> +
> +static inline int set_memory_nx(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_NX);
> +}
> +
> +static inline int set_memory_x(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_X);
> +}
> +
> +#endif
> diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
> index 3b4e9e4e25ea..d8a08abde1ae 100644
> --- a/arch/powerpc/mm/Makefile
> +++ b/arch/powerpc/mm/Makefile
> @@ -5,7 +5,7 @@
>  
>  ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
>  
> -obj-y				:= fault.o mem.o pgtable.o mmap.o maccess.o \
> +obj-y				:= fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
>  				   init_$(BITS).o pgtable_$(BITS).o \
>  				   pgtable-frag.o ioremap.o ioremap_$(BITS).o \
>  				   init-common.o mmu_context.o drmem.o
> diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> new file mode 100644
> index 000000000000..2da3fbab6ff7
> --- /dev/null
> +++ b/arch/powerpc/mm/pageattr.c
> @@ -0,0 +1,81 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * MMU-generic set_memory implementation for powerpc
> + *
> + * Copyright 2019, IBM Corporation.
> + */
> +
> +#include <linux/mm.h>
> +#include <linux/set_memory.h>
> +
> +#include <asm/mmu.h>
> +#include <asm/page.h>
> +#include <asm/pgtable.h>
> +
> +
> +/*
> + * Updates the attributes of a page in three steps:
> + *
> + * 1. invalidate the page table entry
> + * 2. flush the TLB
> + * 3. install the new entry with the updated attributes
> + *
> + * This is unsafe if the caller is attempting to change the mapping of the
> + * page it is executing from, or if another CPU is concurrently using the
> + * page being altered.
> + *
> + * TODO make the implementation resistant to this.
> + *
> + * NOTE: can be dangerous to call without STRICT_KERNEL_RWX
> + */
> +static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	long action = (long)data;
> +	pte_t pte;
> +
> +	spin_lock(&init_mm.page_table_lock);
> +
> +	/* invalidate the PTE so it's safe to modify */
> +	pte = ptep_get_and_clear(&init_mm, addr, ptep);
> +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> +
> +	/* modify the PTE bits as desired, then apply */
> +	switch (action) {
> +	case SET_MEMORY_RO:
> +		pte = pte_wrprotect(pte);
> +		break;
> +	case SET_MEMORY_RW:
> +		pte = pte_mkwrite(pte);
> +		break;
> +	case SET_MEMORY_NX:
> +		pte = pte_exprotect(pte);
> +		break;
> +	case SET_MEMORY_X:
> +		pte = pte_mkexec(pte);
> +		break;
> +	default:
> +		WARN_ON_ONCE(1);
> +		break;
> +	}
> +
> +	set_pte_at(&init_mm, addr, ptep, pte);
> +	spin_unlock(&init_mm.page_table_lock);
> +
> +	return 0;
> +}
> +
> +int change_memory_attr(unsigned long addr, int numpages, long action)
> +{
> +	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> +	unsigned long sz = numpages * PAGE_SIZE;
> +
> +	if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> +		return 0;
> +
> +	if (numpages <= 0)
> +		return 0;
> +
> +	return apply_to_existing_page_range(&init_mm, start, sz,
> +					    change_page_attr, (void *)action);
> +}
> -- 
> 2.25.1

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2021-03-19  1:19 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-16  3:17 [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Jordan Niethe
2021-03-16  3:17 ` [PATCH v9 2/8] powerpc/lib/code-patching: Set up Strict RWX patching earlier Jordan Niethe
2021-03-16  3:36   ` Russell Currey
2021-03-16  6:32   ` Christophe Leroy
2021-03-17  0:38     ` Jordan Niethe
2021-03-17 12:04       ` Michael Ellerman
2021-03-16  3:17 ` [PATCH v9 3/8] powerpc/kprobes: Mark newly allocated probes as RO Jordan Niethe
2021-03-16  6:44   ` Christophe Leroy
2021-03-17  0:50     ` Jordan Niethe
2021-03-17  0:52     ` Jordan Niethe
2021-03-17  6:12   ` Christophe Leroy
2021-03-18  2:42     ` Jordan Niethe
2021-03-16  3:17 ` [PATCH v9 4/8] powerpc/mm/ptdump: debugfs handler for W+X checks at runtime Jordan Niethe
2021-03-16  6:47   ` Christophe Leroy
2021-03-16  3:17 ` [PATCH v9 5/8] powerpc: Set ARCH_HAS_STRICT_MODULE_RWX Jordan Niethe
2021-03-16  6:51   ` Christophe Leroy
2021-03-17  2:15     ` Jordan Niethe
2021-03-17  5:43       ` Christophe Leroy
2021-03-16  3:17 ` [PATCH v9 6/8] powerpc/configs: Enable STRICT_MODULE_RWX in skiroot_defconfig Jordan Niethe
2021-03-16  3:17 ` [PATCH v9 7/8] powerpc/mm: implement set_memory_attr() Jordan Niethe
2021-03-16  7:25   ` Christophe Leroy
2021-03-17  0:54     ` Jordan Niethe
2021-03-16  3:17 ` [PATCH v9 8/8] powerpc/32: use set_memory_attr() Jordan Niethe
2021-03-19  1:19 ` [PATCH v9 1/8] powerpc/mm: Implement set_memory() routines Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.