linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Wei Li <liwei391@huawei.com>
To: <acme@kernel.org>, <jolsa@redhat.com>, <namhyung@kernel.org>,
	<alexander.shishkin@linux.intel.com>, <peterz@infradead.org>,
	<mingo@redhat.com>
Cc: <linux-kernel@vger.kernel.org>, <xiezhipeng1@huawei.com>
Subject: [PATCH v2] fix use-after-free in perf_sched__lat
Date: Wed, 8 May 2019 22:36:48 +0800	[thread overview]
Message-ID: <20190508143648.8153-1-liwei391@huawei.com> (raw)

After thread is added to machine->threads[i].dead in
__machine__remove_thread, the machine->threads[i].dead is freed
when calling free(session) in perf_session__delete(). So it get a
Segmentation fault when accessing it in thread__put().

In this patch, we delay the perf_session__delete until all threads
have been deleted.

This can be reproduced by following steps:
	ulimit -c unlimited
	export MALLOC_MMAP_THRESHOLD_=0
	perf sched record sleep 10
	perf sched latency --sort max
	Segmentation fault (core dumped)

Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Signed-off-by: Wei Li <liwei391@huawei.com>
---
 tools/perf/builtin-sched.c | 63 ++++++++++++++++++++++++++------------
 1 file changed, 43 insertions(+), 20 deletions(-)

diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 275f2d92a7bf..8a4841fa124c 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1774,7 +1774,8 @@ static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
 	return 0;
 }
 
-static int perf_sched__read_events(struct perf_sched *sched)
+static int __perf_sched__read_events(struct perf_sched *sched,
+					struct perf_session *session)
 {
 	const struct perf_evsel_str_handler handlers[] = {
 		{ "sched:sched_switch",	      process_sched_switch_event, },
@@ -1783,30 +1784,17 @@ static int perf_sched__read_events(struct perf_sched *sched)
 		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
 		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
 	};
-	struct perf_session *session;
-	struct perf_data data = {
-		.path  = input_name,
-		.mode  = PERF_DATA_MODE_READ,
-		.force = sched->force,
-	};
-	int rc = -1;
-
-	session = perf_session__new(&data, false, &sched->tool);
-	if (session == NULL) {
-		pr_debug("No Memory for session\n");
-		return -1;
-	}
 
 	symbol__init(&session->header.env);
 
 	if (perf_session__set_tracepoints_handlers(session, handlers))
-		goto out_delete;
+		return -1;
 
 	if (perf_session__has_traces(session, "record -R")) {
 		int err = perf_session__process_events(session);
 		if (err) {
 			pr_err("Failed to process events, error %d", err);
-			goto out_delete;
+			return -1;
 		}
 
 		sched->nr_events      = session->evlist->stats.nr_events[0];
@@ -1814,9 +1802,28 @@ static int perf_sched__read_events(struct perf_sched *sched)
 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
 	}
 
-	rc = 0;
-out_delete:
+	return 0;
+}
+
+static int perf_sched__read_events(struct perf_sched *sched)
+{
+	struct perf_session *session;
+	struct perf_data data = {
+		.path  = input_name,
+		.mode  = PERF_DATA_MODE_READ,
+		.force = sched->force,
+	};
+	int rc;
+
+	session = perf_session__new(&data, false, &sched->tool);
+	if (session == NULL) {
+		pr_debug("No Memory for session\n");
+		return -1;
+	}
+
+	rc = __perf_sched__read_events(sched, session);
 	perf_session__delete(session);
+
 	return rc;
 }
 
@@ -3130,12 +3137,25 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
 
 static int perf_sched__lat(struct perf_sched *sched)
 {
+	struct perf_session *session;
+	struct perf_data data = {
+		.path  = input_name,
+		.mode  = PERF_DATA_MODE_READ,
+		.force = sched->force,
+	};
 	struct rb_node *next;
+	int rc = -1;
 
 	setup_pager();
 
-	if (perf_sched__read_events(sched))
+	session = perf_session__new(&data, false, &sched->tool);
+	if (session == NULL) {
+		pr_debug("No Memory for session\n");
 		return -1;
+	}
+
+	if (__perf_sched__read_events(sched, session))
+		goto out_delete;
 
 	perf_sched__merge_lat(sched);
 	perf_sched__sort_lat(sched);
@@ -3164,7 +3184,10 @@ static int perf_sched__lat(struct perf_sched *sched)
 	print_bad_events(sched);
 	printf("\n");
 
-	return 0;
+	rc = 0;
+out_delete:
+	perf_session__delete(session);
+	return rc;
 }
 
 static int setup_map_cpus(struct perf_sched *sched)
-- 
2.17.1


             reply	other threads:[~2019-05-08 14:28 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-08 14:36 Wei Li [this message]
2019-05-22  6:56 ` [PATCH v2] fix use-after-free in perf_sched__lat Namhyung Kim
2019-05-22 11:08   ` Arnaldo Carvalho de Melo
2019-05-23  2:50     ` Namhyung Kim
2019-07-04 11:21       ` liwei (GF)
2019-07-04 19:43         ` Arnaldo Carvalho de Melo
2019-07-09 11:29           ` [tip:perf/core] perf thread: Allow references to thread objects after machine__exit() tip-bot for Arnaldo Carvalho de Melo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190508143648.8153-1-liwei391@huawei.com \
    --to=liwei391@huawei.com \
    --cc=acme@kernel.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=xiezhipeng1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).