linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
To: mpe@ellerman.id.au, mikey@neuling.org
Cc: apopple@linux.ibm.com, paulus@samba.org, npiggin@gmail.com,
	christophe.leroy@c-s.fr, naveen.n.rao@linux.vnet.ibm.com,
	peterz@infradead.org, jolsa@kernel.org, oleg@redhat.com,
	fweisbec@gmail.com, mingo@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org,
	Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Subject: [PATCH 13/15] powerpc/watchpoint: Don't allow concurrent perf and ptrace events
Date: Mon,  9 Mar 2020 14:28:04 +0530	[thread overview]
Message-ID: <20200309085806.155823-14-ravi.bangoria@linux.ibm.com> (raw)
In-Reply-To: <20200309085806.155823-1-ravi.bangoria@linux.ibm.com>

ptrace and perf watchpoints on powerpc behaves differently. Ptrace
watchpoint works in one-shot mode and generates signal before executing
instruction. It's ptrace user's job to single-step the instruction and
re-enable the watchpoint. OTOH, in case of perf watchpoint, kernel
emulates/single-steps the instruction and then generates event. If perf
and ptrace creates two events with same or overlapping address ranges,
it's ambiguous to decide who should single-step the instruction. Because
of this issue ptrace and perf event can't coexist when the address range
overlaps.

Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
---
 arch/powerpc/include/asm/hw_breakpoint.h |   2 +
 arch/powerpc/kernel/hw_breakpoint.c      | 220 +++++++++++++++++++++++
 kernel/events/hw_breakpoint.c            |  16 ++
 3 files changed, 238 insertions(+)

diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index ec61e2b7195c..6e1a19af5177 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -66,6 +66,8 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
 						unsigned long val, void *data);
 int arch_install_hw_breakpoint(struct perf_event *bp);
 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+int arch_reserve_bp_slot(struct perf_event *bp);
+void arch_release_bp_slot(struct perf_event *bp);
 void arch_unregister_hw_breakpoint(struct perf_event *bp);
 void hw_breakpoint_pmu_read(struct perf_event *bp);
 extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 2ac89b92590f..d8529d9151e8 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -123,6 +123,226 @@ static bool is_ptrace_bp(struct perf_event *bp)
 	return (bp->overflow_handler == ptrace_triggered);
 }
 
+struct breakpoint {
+	struct list_head list;
+	struct perf_event *bp;
+	bool ptrace_bp;
+};
+
+static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
+static LIST_HEAD(task_bps);
+
+static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
+{
+	struct breakpoint *tmp;
+
+	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+	if (!tmp)
+		return ERR_PTR(-ENOMEM);
+	tmp->bp = bp;
+	tmp->ptrace_bp = is_ptrace_bp(bp);
+	return tmp;
+}
+
+static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
+{
+	__u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
+
+	bp1_saddr = bp1->attr.bp_addr & ~HW_BREAKPOINT_ALIGN;
+	bp1_eaddr = (bp1->attr.bp_addr + bp1->attr.bp_len - 1) | HW_BREAKPOINT_ALIGN;
+	bp2_saddr = bp2->attr.bp_addr & ~HW_BREAKPOINT_ALIGN;
+	bp2_eaddr = (bp2->attr.bp_addr + bp2->attr.bp_len - 1) | HW_BREAKPOINT_ALIGN;
+
+	return (bp1_saddr <= bp2_eaddr && bp1_eaddr >= bp2_saddr);
+}
+
+static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
+{
+	return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
+}
+
+static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
+{
+	return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
+}
+
+static int task_bps_add(struct perf_event *bp)
+{
+	struct breakpoint *tmp;
+
+	tmp = alloc_breakpoint(bp);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+
+	list_add(&tmp->list, &task_bps);
+	return 0;
+}
+
+static void task_bps_remove(struct perf_event *bp)
+{
+	struct list_head *pos, *q;
+	struct breakpoint *tmp;
+
+	list_for_each_safe(pos, q, &task_bps) {
+		tmp = list_entry(pos, struct breakpoint, list);
+
+		if (tmp->bp == bp) {
+			list_del(&tmp->list);
+			kfree(tmp);
+			break;
+		}
+	}
+}
+
+/*
+ * If any task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool all_task_bps_check(struct perf_event *bp)
+{
+	struct breakpoint *tmp;
+
+	list_for_each_entry(tmp, &task_bps, list) {
+		if (!can_co_exist(tmp, bp))
+			return true;
+	}
+	return false;
+}
+
+/*
+ * If same task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool same_task_bps_check(struct perf_event *bp)
+{
+	struct breakpoint *tmp;
+
+	list_for_each_entry(tmp, &task_bps, list) {
+		if (tmp->bp->hw.target == bp->hw.target &&
+		    !can_co_exist(tmp, bp))
+			return true;
+	}
+	return false;
+}
+
+static int cpu_bps_add(struct perf_event *bp)
+{
+	struct breakpoint **cpu_bp;
+	struct breakpoint *tmp;
+	int i = 0;
+
+	tmp = alloc_breakpoint(bp);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+
+	cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+	for (i = 0; i < nr_wp_slots(); i++) {
+		if (!cpu_bp[i]) {
+			cpu_bp[i] = tmp;
+			break;
+		}
+	}
+	return 0;
+}
+
+static void cpu_bps_remove(struct perf_event *bp)
+{
+	struct breakpoint **cpu_bp;
+	int i = 0;
+
+	cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+	for (i = 0; i < nr_wp_slots(); i++) {
+		if (!cpu_bp[i])
+			continue;
+
+		if (cpu_bp[i]->bp == bp) {
+			kfree(cpu_bp[i]);
+			cpu_bp[i] = NULL;
+			break;
+		}
+	}
+}
+
+static bool cpu_bps_check(int cpu, struct perf_event *bp)
+{
+	struct breakpoint **cpu_bp;
+	int i;
+
+	cpu_bp = per_cpu_ptr(cpu_bps, cpu);
+	for (i = 0; i < nr_wp_slots(); i++) {
+		if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
+			return true;
+	}
+	return false;
+}
+
+static bool all_cpu_bps_check(struct perf_event *bp)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu_bps_check(cpu, bp))
+			return true;
+	}
+	return false;
+}
+
+/*
+ * We don't use any locks to serialize accesses to cpu_bps or task_bps
+ * because are already inside nr_bp_mutex.
+ */
+int arch_reserve_bp_slot(struct perf_event *bp)
+{
+	int ret;
+
+	if (is_ptrace_bp(bp)) {
+		if (all_cpu_bps_check(bp))
+			return -ENOSPC;
+
+		if (same_task_bps_check(bp))
+			return -ENOSPC;
+
+		return task_bps_add(bp);
+	} else {
+		if (is_kernel_addr(bp->attr.bp_addr))
+			return 0;
+
+		if (bp->hw.target && bp->cpu == -1) {
+			if (same_task_bps_check(bp))
+				return -ENOSPC;
+
+			return task_bps_add(bp);
+		} else if (!bp->hw.target && bp->cpu != -1) {
+			if (all_task_bps_check(bp))
+				return -ENOSPC;
+
+			return cpu_bps_add(bp);
+		} else {
+			if (same_task_bps_check(bp))
+				return -ENOSPC;
+
+			ret = cpu_bps_add(bp);
+			if (ret)
+				return ret;
+			ret = task_bps_add(bp);
+			if (ret)
+				cpu_bps_remove(bp);
+
+			return ret;
+		}
+	}
+}
+
+void arch_release_bp_slot(struct perf_event *bp)
+{
+	if (!is_kernel_addr(bp->attr.bp_addr)) {
+		if (bp->hw.target)
+			task_bps_remove(bp);
+		if (bp->cpu != -1)
+			cpu_bps_remove(bp);
+	}
+}
+
 /*
  * Perform cleanup of arch-specific counters during unregistration
  * of the perf-event
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 3cc8416ec844..b48d7039a015 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -213,6 +213,15 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
 		list_del(&bp->hw.bp_list);
 }
 
+__weak int arch_reserve_bp_slot(struct perf_event *bp)
+{
+	return 0;
+}
+
+__weak void arch_release_bp_slot(struct perf_event *bp)
+{
+}
+
 /*
  * Function to perform processor-specific cleanup during unregistration
  */
@@ -270,6 +279,7 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
 	struct bp_busy_slots slots = {0};
 	enum bp_type_idx type;
 	int weight;
+	int ret;
 
 	/* We couldn't initialize breakpoint constraints on boot */
 	if (!constraints_initialized)
@@ -294,6 +304,10 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
 	if (slots.pinned + (!!slots.flexible) > nr_slots[type])
 		return -ENOSPC;
 
+	ret = arch_reserve_bp_slot(bp);
+	if (ret)
+		return ret;
+
 	toggle_bp_slot(bp, true, type, weight);
 
 	return 0;
@@ -317,6 +331,8 @@ static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
 	enum bp_type_idx type;
 	int weight;
 
+	arch_release_bp_slot(bp);
+
 	type = find_slot_idx(bp_type);
 	weight = hw_breakpoint_weight(bp);
 	toggle_bp_slot(bp, false, type, weight);
-- 
2.21.1


  parent reply	other threads:[~2020-03-09  8:59 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-09  8:57 [PATCH 00/15] powerpc/watchpoint: Preparation for more than one watchpoint Ravi Bangoria
2020-03-09  8:57 ` [PATCH 01/15] powerpc/watchpoint: Rename current DAWR macros Ravi Bangoria
2020-03-17 10:14   ` Christophe Leroy
2020-03-18 12:40     ` Ravi Bangoria
2020-03-09  8:57 ` [PATCH 02/15] powerpc/watchpoint: Add SPRN macros for second DAWR Ravi Bangoria
2020-03-17 10:16   ` Christophe Leroy
2020-03-18 18:30     ` Segher Boessenkool
2020-03-09  8:57 ` [PATCH 03/15] powerpc/watchpoint: Introduce function to get nr watchpoints dynamically Ravi Bangoria
2020-03-17 10:21   ` Christophe Leroy
2020-03-18  5:50     ` Ravi Bangoria
2020-03-09  8:57 ` [PATCH 04/15] powerpc/watchpoint/ptrace: Return actual num of available watchpoints Ravi Bangoria
2020-03-09  8:57 ` [PATCH 05/15] powerpc/watchpoint: Provide DAWR number to set_dawr Ravi Bangoria
2020-03-17 10:28   ` Christophe Leroy
2020-03-18  6:18     ` Ravi Bangoria
2020-03-09  8:57 ` [PATCH 06/15] powerpc/watchpoint: Provide DAWR number to __set_breakpoint Ravi Bangoria
2020-03-09  8:57 ` [PATCH 07/15] powerpc/watchpoint: Get watchpoint count dynamically while disabling them Ravi Bangoria
2020-03-17 10:32   ` Christophe Leroy
2020-03-18  6:57     ` Ravi Bangoria
2020-03-26  3:32       ` Ravi Bangoria
2020-03-09  8:57 ` [PATCH 08/15] powerpc/watchpoint: Disable all available watchpoints when !dawr_force_enable Ravi Bangoria
2020-03-17 10:35   ` Christophe Leroy
2020-03-18  7:32     ` Ravi Bangoria
2020-03-09  8:58 ` [PATCH 09/15] powerpc/watchpoint: Convert thread_struct->hw_brk to an array Ravi Bangoria
2020-03-17 10:37   ` Christophe Leroy
2020-03-18  8:36     ` Ravi Bangoria
2020-03-18  8:56       ` Christophe Leroy
2020-03-18  9:22         ` Ravi Bangoria
2020-03-09  8:58 ` [PATCH 10/15] powerpc/watchpoint: Use loop for thread_struct->ptrace_bps Ravi Bangoria
2020-03-17 10:48   ` Christophe Leroy
2020-03-18  9:43     ` Ravi Bangoria
2020-03-09  8:58 ` [PATCH 11/15] powerpc/watchpoint: Introduce is_ptrace_bp() function Ravi Bangoria
2020-03-17 10:49   ` Christophe Leroy
2020-03-09  8:58 ` [PATCH 12/15] powerpc/watchpoint: Prepare handler to handle more than one watcnhpoint Ravi Bangoria
2020-03-17 10:59   ` Christophe Leroy
2020-03-18 11:35     ` Michael Ellerman
2020-03-18 11:44       ` Christophe Leroy
2020-03-18 21:27         ` Segher Boessenkool
2020-03-18 23:36           ` Michael Ellerman
2020-03-18 12:14     ` Ravi Bangoria
2020-03-09  8:58 ` Ravi Bangoria [this message]
2020-03-17 11:08   ` [PATCH 13/15] powerpc/watchpoint: Don't allow concurrent perf and ptrace events Christophe Leroy
2020-03-18 12:35     ` Ravi Bangoria
2020-03-09  8:58 ` [PATCH 14/15] powerpc/watchpoint/xmon: Don't allow breakpoint overwriting Ravi Bangoria
2020-03-17 11:10   ` Christophe Leroy
2020-03-18 12:37     ` Ravi Bangoria
2020-03-18 13:31       ` Christophe Leroy
2020-03-09  8:58 ` [PATCH 15/15] powerpc/watchpoint/xmon: Support 2nd dawr Ravi Bangoria
2020-03-17 11:14   ` Christophe Leroy
2020-03-18 12:39     ` Ravi Bangoria
2020-03-16 15:05 ` [PATCH 00/15] powerpc/watchpoint: Preparation for more than one watchpoint Christophe Leroy
2020-03-16 18:43   ` Segher Boessenkool
2020-03-17  5:56     ` Christophe Leroy
2020-03-18 12:52   ` Ravi Bangoria
2020-03-23 13:37     ` Ravi Bangoria

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200309085806.155823-14-ravi.bangoria@linux.ibm.com \
    --to=ravi.bangoria@linux.ibm.com \
    --cc=apopple@linux.ibm.com \
    --cc=christophe.leroy@c-s.fr \
    --cc=fweisbec@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mikey@neuling.org \
    --cc=mingo@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=naveen.n.rao@linux.vnet.ibm.com \
    --cc=npiggin@gmail.com \
    --cc=oleg@redhat.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).