All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU
@ 2021-11-10  0:37 Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 1/5] powerpc: Allow clearing and restoring registers independent of saved breakpoint state Jordan Niethe
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

This is a revision of Chris' series to introduces a per cpu temporary mm to be
used for patching with strict rwx on radix mmus.

The previous version of the series is here:
https://lore.kernel.org/linuxppc-dev/20210911022904.30962-1-cmr@bluescreens.de/

v7: - introduce helper functions for clearing and restoring breakpoint
      registers when using the temporary mm
    - use a new patch_instruction_mm() function instead of needing repeated
      conditionals and a struct to save state to work within
      do_patch_instruction() 
    - include a ptesync after setting the pte

Christopher M. Riedl (4):
  powerpc/64s: Introduce temporary mm for Radix MMU
  powerpc: Rework and improve STRICT_KERNEL_RWX patching
  powerpc: Use WARN_ON and fix check in poking_init
  powerpc/64s: Initialize and use a temporary mm for patching on Radix

Jordan Niethe (1):
  powerpc: Allow clearing and restoring registers independent of saved
    breakpoint state

 arch/powerpc/include/asm/debug.h |   2 +
 arch/powerpc/kernel/process.c    |  36 ++++++-
 arch/powerpc/lib/code-patching.c | 162 +++++++++++++++++++++++++++----
 3 files changed, 176 insertions(+), 24 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v7 1/5] powerpc: Allow clearing and restoring registers independent of saved breakpoint state
  2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
@ 2021-11-10  0:37 ` Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 2/5] powerpc/64s: Introduce temporary mm for Radix MMU Jordan Niethe
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

For the coming temporary mm used for instruction patching, the
breakpoint registers need to be cleared to prevent them from
accidentally being triggered. As soon as the patching is done, the
breakpoints will be restored. The breakpoint state is stored in the per
cpu variable current_brk[]. Add a pause_breakpoints() function which will
clear the breakpoint registers without touching the state in
current_bkr[]. Add a pair function unpause_breakpoints() which will move
the state in current_brk[] back to the registers.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v7: New to series
---
 arch/powerpc/include/asm/debug.h |  2 ++
 arch/powerpc/kernel/process.c    | 36 +++++++++++++++++++++++++++++---
 2 files changed, 35 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 86a14736c76c..83f2dc3785e8 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -46,6 +46,8 @@ static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
 #endif
 
 void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk);
+void pause_breakpoints(void);
+void unpause_breakpoints(void);
 bool ppc_breakpoint_available(void);
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 extern void do_send_trap(struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 406d7ee9e322..22ed72430683 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -688,6 +688,7 @@ DEFINE_INTERRUPT_HANDLER(do_break)
 
 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
 
+
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 /*
  * Set the debug registers back to their default "safe" values.
@@ -865,10 +866,8 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
 	return 0;
 }
 
-void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
+static void ____set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
 {
-	memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
-
 	if (dawr_enabled())
 		// Power8 or later
 		set_dawr(nr, brk);
@@ -882,6 +881,12 @@ void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
 		WARN_ON_ONCE(1);
 }
 
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
+{
+	memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
+	____set_breakpoint(nr, brk);
+}
+
 /* Check if we have DAWR or DABR hardware */
 bool ppc_breakpoint_available(void)
 {
@@ -894,6 +899,31 @@ bool ppc_breakpoint_available(void)
 }
 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
 
+/* Disable the breakpoint in hardware without touching current_brk[] */
+void pause_breakpoints(void)
+{
+	struct arch_hw_breakpoint brk = {0};
+	int i;
+
+	if (!ppc_breakpoint_available())
+		return;
+
+	for (i = 0; i < nr_wp_slots(); i++)
+		____set_breakpoint(i, &brk);
+}
+
+/* Renable the breakpoint in hardware from current_brk[] */
+void unpause_breakpoints(void)
+{
+	int i;
+
+	if (!ppc_breakpoint_available())
+		return;
+
+	for (i = 0; i < nr_wp_slots(); i++)
+		____set_breakpoint(i, this_cpu_ptr(&current_brk[i]));
+}
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 
 static inline bool tm_enabled(struct task_struct *tsk)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v7 2/5] powerpc/64s: Introduce temporary mm for Radix MMU
  2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 1/5] powerpc: Allow clearing and restoring registers independent of saved breakpoint state Jordan Niethe
@ 2021-11-10  0:37 ` Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching Jordan Niethe
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

From: "Christopher M. Riedl" <cmr@bluescreens.de>

x86 supports the notion of a temporary mm which restricts access to
temporary PTEs to a single CPU. A temporary mm is useful for situations
where a CPU needs to perform sensitive operations (such as patching a
STRICT_KERNEL_RWX kernel) requiring temporary mappings without exposing
said mappings to other CPUs. Another benefit is that other CPU TLBs do
not need to be flushed when the temporary mm is torn down.

Mappings in the temporary mm can be set in the userspace portion of the
address-space.

Interrupts must be disabled while the temporary mm is in use. HW
breakpoints, which may have been set by userspace as watchpoints on
addresses now within the temporary mm, are saved and disabled when
loading the temporary mm. The HW breakpoints are restored when unloading
the temporary mm. All HW breakpoints are indiscriminately disabled while
the temporary mm is in use - this may include breakpoints set by perf.

Based on x86 implementation:

commit cefa929c034e
("x86/mm: Introduce temporary mm structs")

Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v7: - use breakpoint_pause()/breakpoint_unpause()
    - simplify the temp mm struct, don't need init_temp_mm()
---
 arch/powerpc/lib/code-patching.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c5ed98823835..29a30c3068ff 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -17,6 +17,9 @@
 #include <asm/code-patching.h>
 #include <asm/setup.h>
 #include <asm/inst.h>
+#include <asm/mmu_context.h>
+#include <asm/debug.h>
+#include <asm/tlb.h>
 
 static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
 {
@@ -45,6 +48,32 @@ int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
+
+struct temp_mm_state {
+	struct mm_struct *mm;
+};
+
+static inline struct temp_mm_state start_using_temp_mm(struct mm_struct *mm)
+{
+	struct temp_mm_state temp_state;
+
+	lockdep_assert_irqs_disabled();
+	temp_state.mm = current->active_mm;
+	switch_mm_irqs_off(current->active_mm, mm, current);
+
+	WARN_ON(!mm_is_thread_local(mm));
+
+	pause_breakpoints();
+	return temp_state;
+}
+
+static inline void stop_using_temp_mm(struct temp_mm_state prev_state)
+{
+	lockdep_assert_irqs_disabled();
+	switch_mm_irqs_off(current->active_mm, prev_state.mm, current);
+	unpause_breakpoints();
+}
+
 static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
 
 static int text_area_cpu_up(unsigned int cpu)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching
  2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 1/5] powerpc: Allow clearing and restoring registers independent of saved breakpoint state Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 2/5] powerpc/64s: Introduce temporary mm for Radix MMU Jordan Niethe
@ 2021-11-10  0:37 ` Jordan Niethe
  2022-03-12  7:30   ` Christophe Leroy
  2021-11-10  0:37 ` [PATCH v7 4/5] powerpc: Use WARN_ON and fix check in poking_init Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 5/5] powerpc/64s: Initialize and use a temporary mm for patching on Radix Jordan Niethe
  4 siblings, 1 reply; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

From: "Christopher M. Riedl" <cmr@bluescreens.de>

Rework code-patching with STRICT_KERNEL_RWX to prepare for a later patch
which uses a temporary mm for patching under the Book3s64 Radix MMU.
Make improvements by adding a WARN_ON when the patchsite doesn't match
after patching and return the error from __patch_instruction() properly.

Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v7: still pass addr to map_patch_area()
---
 arch/powerpc/lib/code-patching.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 29a30c3068ff..d586bf9c7581 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -75,6 +75,7 @@ static inline void stop_using_temp_mm(struct temp_mm_state prev_state)
 }
 
 static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+static DEFINE_PER_CPU(unsigned long, cpu_patching_addr);
 
 static int text_area_cpu_up(unsigned int cpu)
 {
@@ -87,6 +88,7 @@ static int text_area_cpu_up(unsigned int cpu)
 		return -1;
 	}
 	this_cpu_write(text_poke_area, area);
+	this_cpu_write(cpu_patching_addr, (unsigned long)area->addr);
 
 	return 0;
 }
@@ -172,11 +174,10 @@ static inline int unmap_patch_area(unsigned long addr)
 
 static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
 {
-	int err;
+	int err, rc = 0;
 	u32 *patch_addr = NULL;
 	unsigned long flags;
 	unsigned long text_poke_addr;
-	unsigned long kaddr = (unsigned long)addr;
 
 	/*
 	 * During early early boot patch_instruction is called
@@ -188,15 +189,13 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
 
 	local_irq_save(flags);
 
-	text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
-	if (map_patch_area(addr, text_poke_addr)) {
-		err = -1;
+	text_poke_addr = __this_cpu_read(cpu_patching_addr);
+	err = map_patch_area(addr, text_poke_addr);
+	if (err)
 		goto out;
-	}
-
-	patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
 
-	__patch_instruction(addr, instr, patch_addr);
+	patch_addr = (u32 *)(text_poke_addr | offset_in_page(addr));
+	rc = __patch_instruction(addr, instr, patch_addr);
 
 	err = unmap_patch_area(text_poke_addr);
 	if (err)
@@ -204,8 +203,9 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
 
 out:
 	local_irq_restore(flags);
+	WARN_ON(!ppc_inst_equal(ppc_inst_read(addr), instr));
 
-	return err;
+	return rc ? rc : err;
 }
 #else /* !CONFIG_STRICT_KERNEL_RWX */
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v7 4/5] powerpc: Use WARN_ON and fix check in poking_init
  2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
                   ` (2 preceding siblings ...)
  2021-11-10  0:37 ` [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching Jordan Niethe
@ 2021-11-10  0:37 ` Jordan Niethe
  2021-11-10  0:37 ` [PATCH v7 5/5] powerpc/64s: Initialize and use a temporary mm for patching on Radix Jordan Niethe
  4 siblings, 0 replies; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

From: "Christopher M. Riedl" <cmr@bluescreens.de>

The latest kernel docs list BUG_ON() as 'deprecated' and that they
should be replaced with WARN_ON() (or pr_warn()) when possible. The
BUG_ON() in poking_init() warrants a WARN_ON() rather than a pr_warn()
since the error condition is deemed "unreachable".

Also take this opportunity to fix the failure check in the WARN_ON():
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, ...) returns a positive integer
on success and a negative integer on failure.

Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v7: no change
---
 arch/powerpc/lib/code-patching.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index d586bf9c7581..aa466e4930ec 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -99,16 +99,11 @@ static int text_area_cpu_down(unsigned int cpu)
 	return 0;
 }
 
-/*
- * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
- * we judge it as being preferable to a kernel that will crash later when
- * someone tries to use patch_instruction().
- */
 void __init poking_init(void)
 {
-	BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+	WARN_ON(cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 		"powerpc/text_poke:online", text_area_cpu_up,
-		text_area_cpu_down));
+		text_area_cpu_down) < 0);
 }
 
 /*
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v7 5/5] powerpc/64s: Initialize and use a temporary mm for patching on Radix
  2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
                   ` (3 preceding siblings ...)
  2021-11-10  0:37 ` [PATCH v7 4/5] powerpc: Use WARN_ON and fix check in poking_init Jordan Niethe
@ 2021-11-10  0:37 ` Jordan Niethe
  4 siblings, 0 replies; 8+ messages in thread
From: Jordan Niethe @ 2021-11-10  0:37 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, cmr

From: "Christopher M. Riedl" <cmr@bluescreens.de>

When code patching a STRICT_KERNEL_RWX kernel the page containing the
address to be patched is temporarily mapped as writeable. Currently, a
per-cpu vmalloc patch area is used for this purpose. While the patch
area is per-cpu, the temporary page mapping is inserted into the kernel
page tables for the duration of patching. The mapping is exposed to CPUs
other than the patching CPU - this is undesirable from a hardening
perspective. Use a temporary mm instead which keeps the mapping local to
the CPU doing the patching.

Use the `poking_init` init hook to prepare a temporary mm and patching
address. Initialize the temporary mm by copying the init mm. Choose a
randomized patching address inside the temporary mm userspace address
space. The patching address is randomized between PAGE_SIZE and
DEFAULT_MAP_WINDOW-PAGE_SIZE.

Bits of entropy with 64K page size on BOOK3S_64:

        bits of entropy = log2(DEFAULT_MAP_WINDOW_USER64 / PAGE_SIZE)

        PAGE_SIZE=64K, DEFAULT_MAP_WINDOW_USER64=128TB
        bits of entropy = log2(128TB / 64K)
	bits of entropy = 31

The upper limit is DEFAULT_MAP_WINDOW due to how the Book3s64 Hash MMU
operates - by default the space above DEFAULT_MAP_WINDOW is not
available. Currently the Hash MMU does not use a temporary mm so
technically this upper limit isn't necessary; however, a larger
randomization range does not further "harden" this overall approach and
future work may introduce patching with a temporary mm on Hash as well.

Randomization occurs only once during initialization at boot for each
possible CPU in the system.

Introduce a new function, patch_instruction_mm(), to perform the
patching with a temporary mapping with write permissions at
patching_addr. Map the page with PAGE_KERNEL to set EAA[0] for the PTE
which ignores the AMR (so no need to unlock/lock KUAP) according to
PowerISA v3.0b Figure 35 on Radix.

Based on x86 implementation:

commit 4fc19708b165
("x86/alternatives: Initialize temporary mm for patching")

and:

commit b3fd8e83ada0
("x86/alternatives: Use temporary mm for text poking")

Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v7: - Change to patch_instruction_mm() instead of map_patch_mm() and
       unmap_patch_mm()
    - include ptesync
---
 arch/powerpc/lib/code-patching.c | 106 +++++++++++++++++++++++++++++--
 1 file changed, 101 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index aa466e4930ec..7722dec4a914 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -11,6 +11,7 @@
 #include <linux/cpuhotplug.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/random.h>
 
 #include <asm/tlbflush.h>
 #include <asm/page.h>
@@ -76,6 +77,7 @@ static inline void stop_using_temp_mm(struct temp_mm_state prev_state)
 
 static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
 static DEFINE_PER_CPU(unsigned long, cpu_patching_addr);
+static DEFINE_PER_CPU(struct mm_struct *, cpu_patching_mm);
 
 static int text_area_cpu_up(unsigned int cpu)
 {
@@ -99,8 +101,48 @@ static int text_area_cpu_down(unsigned int cpu)
 	return 0;
 }
 
+static __always_inline void __poking_init_temp_mm(void)
+{
+	int cpu;
+	spinlock_t *ptl;
+	pte_t *ptep;
+	struct mm_struct *patching_mm;
+	unsigned long patching_addr;
+
+	for_each_possible_cpu(cpu) {
+		patching_mm = copy_init_mm();
+		WARN_ON(!patching_mm);
+		per_cpu(cpu_patching_mm, cpu) = patching_mm;
+
+		/*
+		 * Choose a randomized, page-aligned address from the range:
+		 * [PAGE_SIZE, DEFAULT_MAP_WINDOW - PAGE_SIZE] The lower
+		 * address bound is PAGE_SIZE to avoid the zero-page.  The
+		 * upper address bound is DEFAULT_MAP_WINDOW - PAGE_SIZE to
+		 * stay under DEFAULT_MAP_WINDOW with the Book3s64 Hash MMU.
+		 */
+		patching_addr = PAGE_SIZE + ((get_random_long() & PAGE_MASK) %
+					     (DEFAULT_MAP_WINDOW - 2 * PAGE_SIZE));
+		per_cpu(cpu_patching_addr, cpu) = patching_addr;
+
+		/*
+		 * PTE allocation uses GFP_KERNEL which means we need to
+		 * pre-allocate the PTE here because we cannot do the
+		 * allocation during patching when IRQs are disabled.
+		 */
+		ptep = get_locked_pte(patching_mm, patching_addr, &ptl);
+		WARN_ON(!ptep);
+		pte_unmap_unlock(ptep, ptl);
+	}
+}
+
 void __init poking_init(void)
 {
+	if (radix_enabled()) {
+		__poking_init_temp_mm();
+		return;
+	}
+
 	WARN_ON(cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 		"powerpc/text_poke:online", text_area_cpu_up,
 		text_area_cpu_down) < 0);
@@ -167,6 +209,57 @@ static inline int unmap_patch_area(unsigned long addr)
 	return 0;
 }
 
+/*
+ * This can be called for kernel text or a module.
+ */
+static int patch_instruction_mm(u32 *addr, struct ppc_inst instr)
+{
+	struct mm_struct *patching_mm = __this_cpu_read(cpu_patching_mm);
+	unsigned long text_poke_addr;
+	u32 *patch_addr = NULL;
+	struct temp_mm_state prev;
+	unsigned long flags;
+	struct page *page;
+	spinlock_t *ptl;
+	int rc;
+	pte_t *ptep;
+
+	text_poke_addr = __this_cpu_read(cpu_patching_addr);
+
+	local_irq_save(flags);
+
+	if (is_vmalloc_or_module_addr(addr))
+		page = vmalloc_to_page(addr);
+	else
+		page = virt_to_page(addr);
+
+	ptep = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+	if (unlikely(!ptep)) {
+		pr_warn("map patch: failed to allocate pte for patching\n");
+		return -1;
+	}
+
+	set_pte_at(patching_mm, text_poke_addr, ptep, pte_mkdirty(mk_pte(page, PAGE_KERNEL)));
+	asm volatile("ptesync": : :"memory");
+
+	prev = start_using_temp_mm(patching_mm);
+
+	patch_addr = (u32 *)(text_poke_addr | offset_in_page(addr));
+	rc = __patch_instruction(addr, instr, patch_addr);
+
+	pte_clear(patching_mm, text_poke_addr, ptep);
+
+	local_flush_tlb_mm(patching_mm);
+
+	stop_using_temp_mm(prev);
+	pte_unmap_unlock(ptep, ptl);
+
+	local_irq_restore(flags);
+	WARN_ON(!ppc_inst_equal(ppc_inst_read(addr), instr));
+
+	return rc;
+}
+
 static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
 {
 	int err, rc = 0;
@@ -175,16 +268,19 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
 	unsigned long text_poke_addr;
 
 	/*
-	 * During early early boot patch_instruction is called
-	 * when text_poke_area is not ready, but we still need
-	 * to allow patching. We just do the plain old patching
+	 * During early boot patch_instruction is called when the
+	 * patching_mm/text_poke_area is not ready, but we still need to allow
+	 * patching. We just do the plain old patching.
 	 */
-	if (!this_cpu_read(text_poke_area))
+	text_poke_addr = __this_cpu_read(cpu_patching_addr);
+	if (!text_poke_addr)
 		return raw_patch_instruction(addr, instr);
 
+	if (radix_enabled())
+		return patch_instruction_mm(addr, instr);
+
 	local_irq_save(flags);
 
-	text_poke_addr = __this_cpu_read(cpu_patching_addr);
 	err = map_patch_area(addr, text_poke_addr);
 	if (err)
 		goto out;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching
  2021-11-10  0:37 ` [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching Jordan Niethe
@ 2022-03-12  7:30   ` Christophe Leroy
  2022-03-14 23:01     ` Jordan Niethe
  0 siblings, 1 reply; 8+ messages in thread
From: Christophe Leroy @ 2022-03-12  7:30 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: cmr

Hi Jordan

Le 10/11/2021 à 01:37, Jordan Niethe a écrit :
> From: "Christopher M. Riedl" <cmr@bluescreens.de>
> 
> Rework code-patching with STRICT_KERNEL_RWX to prepare for a later patch
> which uses a temporary mm for patching under the Book3s64 Radix MMU.
> Make improvements by adding a WARN_ON when the patchsite doesn't match
> after patching and return the error from __patch_instruction() properly.
> 
> Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
> v7: still pass addr to map_patch_area()


This patch doesn-t apply, can you rebase the series ?

Thanks
Christophe

> ---
>   arch/powerpc/lib/code-patching.c | 20 ++++++++++----------
>   1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
> index 29a30c3068ff..d586bf9c7581 100644
> --- a/arch/powerpc/lib/code-patching.c
> +++ b/arch/powerpc/lib/code-patching.c
> @@ -75,6 +75,7 @@ static inline void stop_using_temp_mm(struct temp_mm_state prev_state)
>   }
>   
>   static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
> +static DEFINE_PER_CPU(unsigned long, cpu_patching_addr);
>   
>   static int text_area_cpu_up(unsigned int cpu)
>   {
> @@ -87,6 +88,7 @@ static int text_area_cpu_up(unsigned int cpu)
>   		return -1;
>   	}
>   	this_cpu_write(text_poke_area, area);
> +	this_cpu_write(cpu_patching_addr, (unsigned long)area->addr);
>   
>   	return 0;
>   }
> @@ -172,11 +174,10 @@ static inline int unmap_patch_area(unsigned long addr)
>   
>   static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
>   {
> -	int err;
> +	int err, rc = 0;
>   	u32 *patch_addr = NULL;
>   	unsigned long flags;
>   	unsigned long text_poke_addr;
> -	unsigned long kaddr = (unsigned long)addr;
>   
>   	/*
>   	 * During early early boot patch_instruction is called
> @@ -188,15 +189,13 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
>   
>   	local_irq_save(flags);
>   
> -	text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
> -	if (map_patch_area(addr, text_poke_addr)) {
> -		err = -1;
> +	text_poke_addr = __this_cpu_read(cpu_patching_addr);
> +	err = map_patch_area(addr, text_poke_addr);
> +	if (err)
>   		goto out;
> -	}
> -
> -	patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
>   
> -	__patch_instruction(addr, instr, patch_addr);
> +	patch_addr = (u32 *)(text_poke_addr | offset_in_page(addr));
> +	rc = __patch_instruction(addr, instr, patch_addr);
>   
>   	err = unmap_patch_area(text_poke_addr);
>   	if (err)
> @@ -204,8 +203,9 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
>   
>   out:
>   	local_irq_restore(flags);
> +	WARN_ON(!ppc_inst_equal(ppc_inst_read(addr), instr));
>   
> -	return err;
> +	return rc ? rc : err;
>   }
>   #else /* !CONFIG_STRICT_KERNEL_RWX */
>   

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching
  2022-03-12  7:30   ` Christophe Leroy
@ 2022-03-14 23:01     ` Jordan Niethe
  0 siblings, 0 replies; 8+ messages in thread
From: Jordan Niethe @ 2022-03-14 23:01 UTC (permalink / raw)
  To: Christophe Leroy; +Cc: linuxppc-dev, cmr

On Sat, Mar 12, 2022 at 6:30 PM Christophe Leroy
<christophe.leroy@csgroup.eu> wrote:
>
> Hi Jordan
>
> Le 10/11/2021 à 01:37, Jordan Niethe a écrit :
> > From: "Christopher M. Riedl" <cmr@bluescreens.de>
> >
> > Rework code-patching with STRICT_KERNEL_RWX to prepare for a later patch
> > which uses a temporary mm for patching under the Book3s64 Radix MMU.
> > Make improvements by adding a WARN_ON when the patchsite doesn't match
> > after patching and return the error from __patch_instruction() properly.
> >
> > Signed-off-by: Christopher M. Riedl <cmr@bluescreens.de>
> > Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> > ---
> > v7: still pass addr to map_patch_area()
>
>
> This patch doesn-t apply, can you rebase the series ?
Yep, will do.
>
> Thanks
> Christophe
>
> > ---
> >   arch/powerpc/lib/code-patching.c | 20 ++++++++++----------
> >   1 file changed, 10 insertions(+), 10 deletions(-)
> >
> > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
> > index 29a30c3068ff..d586bf9c7581 100644
> > --- a/arch/powerpc/lib/code-patching.c
> > +++ b/arch/powerpc/lib/code-patching.c
> > @@ -75,6 +75,7 @@ static inline void stop_using_temp_mm(struct temp_mm_state prev_state)
> >   }
> >
> >   static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
> > +static DEFINE_PER_CPU(unsigned long, cpu_patching_addr);
> >
> >   static int text_area_cpu_up(unsigned int cpu)
> >   {
> > @@ -87,6 +88,7 @@ static int text_area_cpu_up(unsigned int cpu)
> >               return -1;
> >       }
> >       this_cpu_write(text_poke_area, area);
> > +     this_cpu_write(cpu_patching_addr, (unsigned long)area->addr);
> >
> >       return 0;
> >   }
> > @@ -172,11 +174,10 @@ static inline int unmap_patch_area(unsigned long addr)
> >
> >   static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
> >   {
> > -     int err;
> > +     int err, rc = 0;
> >       u32 *patch_addr = NULL;
> >       unsigned long flags;
> >       unsigned long text_poke_addr;
> > -     unsigned long kaddr = (unsigned long)addr;
> >
> >       /*
> >        * During early early boot patch_instruction is called
> > @@ -188,15 +189,13 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
> >
> >       local_irq_save(flags);
> >
> > -     text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
> > -     if (map_patch_area(addr, text_poke_addr)) {
> > -             err = -1;
> > +     text_poke_addr = __this_cpu_read(cpu_patching_addr);
> > +     err = map_patch_area(addr, text_poke_addr);
> > +     if (err)
> >               goto out;
> > -     }
> > -
> > -     patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
> >
> > -     __patch_instruction(addr, instr, patch_addr);
> > +     patch_addr = (u32 *)(text_poke_addr | offset_in_page(addr));
> > +     rc = __patch_instruction(addr, instr, patch_addr);
> >
> >       err = unmap_patch_area(text_poke_addr);
> >       if (err)
> > @@ -204,8 +203,9 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
> >
> >   out:
> >       local_irq_restore(flags);
> > +     WARN_ON(!ppc_inst_equal(ppc_inst_read(addr), instr));
> >
> > -     return err;
> > +     return rc ? rc : err;
> >   }
> >   #else /* !CONFIG_STRICT_KERNEL_RWX */
> >

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-03-14 23:01 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-10  0:37 [PATCH v7 0/5] Use per-CPU temporary mappings for patching on Radix MMU Jordan Niethe
2021-11-10  0:37 ` [PATCH v7 1/5] powerpc: Allow clearing and restoring registers independent of saved breakpoint state Jordan Niethe
2021-11-10  0:37 ` [PATCH v7 2/5] powerpc/64s: Introduce temporary mm for Radix MMU Jordan Niethe
2021-11-10  0:37 ` [PATCH v7 3/5] powerpc: Rework and improve STRICT_KERNEL_RWX patching Jordan Niethe
2022-03-12  7:30   ` Christophe Leroy
2022-03-14 23:01     ` Jordan Niethe
2021-11-10  0:37 ` [PATCH v7 4/5] powerpc: Use WARN_ON and fix check in poking_init Jordan Niethe
2021-11-10  0:37 ` [PATCH v7 5/5] powerpc/64s: Initialize and use a temporary mm for patching on Radix Jordan Niethe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.