linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Balbir Singh <bsingharora@gmail.com>
To: linuxppc-dev@lists.ozlabs.org, mpe@ellerman.id.au
Cc: naveen.n.rao@linux.vnet.ibm.com, christophe.leroy@c-s.fr,
	paulus@samba.org, rashmica.g@gmail.com, labbott@redhat.com,
	Balbir Singh <bsingharora@gmail.com>
Subject: [PATCH v2 1/9] powerpc/lib/code-patching: Enhance code patching
Date: Sat,  3 Jun 2017 17:18:35 +1000	[thread overview]
Message-ID: <20170603071843.11966-2-bsingharora@gmail.com> (raw)
In-Reply-To: <20170603071843.11966-1-bsingharora@gmail.com>

Today our patching happens via direct copy and
patch_instruction. The patching code is well
contained in the sense that copying bits are limited.

While considering implementation of CONFIG_STRICT_RWX,
the first requirement is to a create another mapping
that will allow for patching. We create the window using
text_poke_area, allocated via get_vm_area(), which might
be an overkill. text_poke_area is per CPU to avoid locking
Other arches do similar things, but use fixmaps. The reason
for not using fixmaps is to make use of any randomization in
the future. The code also relies on set_pte_at and pte_clear
to do the appropriate tlb flushing.

Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
 arch/powerpc/lib/code-patching.c | 137 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 133 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 500b0f6..eb26b16 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -12,23 +12,151 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/cpuhotplug.h>
 #include <asm/page.h>
 #include <asm/code-patching.h>
 #include <linux/uaccess.h>
 #include <linux/kprobes.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
+static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+static unsigned int text_area_patch_avail;
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int text_area_cpu_up(unsigned int cpu)
+{
+	struct vm_struct *area;
+
+	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+	if (!area) {
+		WARN_ONCE(1, "Failed to create text area for cpu %d\n",
+			cpu);
+		return -1;
+	}
+	this_cpu_write(text_poke_area, area);
+	return 0;
+}
+
+static int text_area_cpu_down(unsigned int cpu)
+{
+	free_vm_area(this_cpu_read(text_poke_area));
+	return 0;
+}
+
+/*
+ * This is an early_initcall and early_initcalls happen at the right time
+ * for us, after slab is enabled and before we mark ro pages R/O. In the
+ * future if get_vm_area is randomized, this will be more flexible than
+ * fixmap
+ */
+static int __init setup_text_poke_area(void)
 {
+	struct vm_struct *area;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+		if (!area) {
+			WARN_ONCE(1, "Failed to create text area for cpu %d\n",
+				cpu);
+			/* Should we disable strict rwx? */
+			continue;
+		}
+		this_cpu_write(text_poke_area, area);
+	}
+	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+		"powerpc/text_poke:online", text_area_cpu_up,
+		text_area_cpu_down);
+	text_area_patch_avail = 1;
+	/*
+	 * The barrier here ensures the write is visible to
+	 * patch_instruction()
+	 */
+	smp_wmb();
+	pr_info("text_poke area ready...\n");
+	return 0;
+}
+
+/*
+ * This can be called for kernel text or a module.
+ */
+static int kernel_map_addr(void *addr)
+{
+	unsigned long pfn;
 	int err;
 
-	__put_user_size(instr, addr, 4, err);
+	if (is_vmalloc_addr(addr))
+		pfn = vmalloc_to_pfn(addr);
+	else
+		pfn = __pa_symbol(addr) >> PAGE_SHIFT;
+
+	err = map_kernel_page(
+			(unsigned long)__this_cpu_read(text_poke_area)->addr,
+			(pfn << PAGE_SHIFT), _PAGE_KERNEL_RW | _PAGE_PRESENT);
+	pr_devel("Mapped addr %p with pfn %lx\n",
+			__this_cpu_read(text_poke_area)->addr, pfn);
 	if (err)
-		return err;
-	asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
+		return -1;
 	return 0;
 }
 
+static inline void kernel_unmap_addr(void *addr)
+{
+	pte_t *pte;
+	unsigned long kaddr = (unsigned long)addr;
+
+	pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(kaddr),
+				kaddr), kaddr), kaddr);
+	pr_devel("clearing mm %p, pte %p, kaddr %lx\n", &init_mm, pte, kaddr);
+	pte_clear(&init_mm, kaddr, pte);
+	flush_tlb_kernel_range(kaddr, kaddr + PAGE_SIZE);
+}
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+	int err;
+	unsigned int *dest = NULL;
+	unsigned long flags;
+	unsigned long kaddr = (unsigned long)addr;
+
+	/*
+	 * Make sure we can see any write of text_area_patch_avail
+	 */
+	smp_rmb();
+
+	/*
+	 * During early early boot patch_instruction is called
+	 * when text_poke_area is not ready, but we still need
+	 * to allow patching. We just do the plain old patching
+	 * We use text_area_patch_avail, since per cpu read
+	 * via __this_cpu_read of text_poke_area might not
+	 * yet be available.
+	 * TODO: Make text_area_patch_avail per cpu?
+	 */
+	if (!text_area_patch_avail) {
+		__put_user_size(instr, addr, 4, err);
+		asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr));
+		return 0;
+	}
+
+	local_irq_save(flags);
+	if (kernel_map_addr(addr)) {
+		err = -1;
+		goto out;
+	}
+
+	dest = (unsigned int *)(__this_cpu_read(text_poke_area)->addr) +
+		((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
+	__put_user_size(instr, dest, 4, err);
+	asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync"
+		::"r" (dest), "r"(addr));
+	kernel_unmap_addr(__this_cpu_read(text_poke_area)->addr);
+out:
+	local_irq_restore(flags);
+	return err;
+}
+NOKPROBE_SYMBOL(patch_instruction);
+
 int patch_branch(unsigned int *addr, unsigned long target, int flags)
 {
 	return patch_instruction(addr, create_branch(addr, target, flags));
@@ -514,3 +642,4 @@ static int __init test_code_patching(void)
 late_initcall(test_code_patching);
 
 #endif /* CONFIG_CODE_PATCHING_SELFTEST */
+early_initcall(setup_text_poke_area);
-- 
2.9.3

  reply	other threads:[~2017-06-03  7:19 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-03  7:18 [PATCH v2 0/9] Enable STRICT_KERNEL_RWX Balbir Singh
2017-06-03  7:18 ` Balbir Singh [this message]
2017-06-03 23:45   ` [PATCH v2 1/9] powerpc/lib/code-patching: Enhance code patching kbuild test robot
2017-06-07 10:25     ` Christophe LEROY
2017-06-07 11:04       ` Balbir Singh
2017-06-07 11:52         ` Michael Ellerman
2017-06-03  7:18 ` [PATCH v2 2/9] powerpc/kprobes: Move kprobes over to patch_instruction Balbir Singh
2017-06-03  7:18 ` [PATCH v2 3/9] powerpc/kprobes/optprobes: Move " Balbir Singh
2017-06-03  7:18 ` [PATCH v2 4/9] powerpc/xmon: Add patch_instruction supporf for xmon Balbir Singh
2017-06-03  7:18 ` [PATCH v2 5/9] powerpc/vmlinux.lds: Align __init_begin to 16M Balbir Singh
2017-06-04  4:22   ` Nicholas Piggin
2017-06-04 22:42     ` Balbir Singh
2017-06-05  6:51       ` Nicholas Piggin
2017-06-03  7:18 ` [PATCH v2 6/9] powerpc/platform/pseries/lpar: Fix updatepp and updateboltedpp Balbir Singh
2017-06-03  7:18 ` [PATCH v2 7/9] powerpc/mm/hash: Implement mark_rodata_ro() for hash Balbir Singh
2017-06-03  7:18 ` [PATCH v2 8/9] powerpc/Kconfig: Enable STRICT_KERNEL_RWX Balbir Singh
2017-06-03  7:18 ` [PATCH v2 9/9] powerpc/mm/ptdump: Dump the first entry of the linear mapping as well Balbir Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170603071843.11966-2-bsingharora@gmail.com \
    --to=bsingharora@gmail.com \
    --cc=christophe.leroy@c-s.fr \
    --cc=labbott@redhat.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=naveen.n.rao@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=rashmica.g@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).