All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Matt Helsley <mhelsley@vmware.com>
Subject: [for-next][PATCH 21/25] recordmcount: Clarify what cleanup() does
Date: Thu, 05 Sep 2019 11:43:19 -0400	[thread overview]
Message-ID: <20190905154343.292050287@goodmis.org> (raw)
In-Reply-To: 20190905154258.573706229@goodmis.org

From: Matt Helsley <mhelsley@vmware.com>

cleanup() mostly frees/unmaps the malloc'd/privately-mapped
copy of the ELF file recordmcount is working on, which is
set up in mmap_file(). It also deals with positioning within
the pseduo prive-mapping of the file and appending to the ELF
file.

Split into two steps:
	mmap_cleanup() for the mapping itself
	file_append_cleanup() for allocations storing the
		appended ELF data.

Also, move the global variable initializations out of the main,
per-object-file loop and nearer to the alloc/init (mmap_file())
and two cleanup functions so we can more clearly see how they're
related.

Link: http://lkml.kernel.org/r/2a387ac86d133d22c68f57b9933c32bab1d09a2d.1564596289.git.mhelsley@vmware.com

Signed-off-by: Matt Helsley <mhelsley@vmware.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 scripts/recordmcount.c | 151 ++++++++++++++++++++++-------------------
 1 file changed, 81 insertions(+), 70 deletions(-)

diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 5677fcc88a72..612268eabef4 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -48,21 +48,26 @@ static void *file_map;	/* pointer of the mapped file */
 static void *file_end;	/* pointer to the end of the mapped file */
 static int file_updated; /* flag to state file was changed */
 static void *file_ptr;	/* current file pointer location */
+
 static void *file_append; /* added to the end of the file */
 static size_t file_append_size; /* how much is added to end of file */
 
 /* Per-file resource cleanup when multiple files. */
-static void cleanup(void)
+static void file_append_cleanup(void)
+{
+	free(file_append);
+	file_append = NULL;
+	file_append_size = 0;
+	file_updated = 0;
+}
+
+static void mmap_cleanup(void)
 {
 	if (!mmap_failed)
 		munmap(file_map, sb.st_size);
 	else
 		free(file_map);
 	file_map = NULL;
-	free(file_append);
-	file_append = NULL;
-	file_append_size = 0;
-	file_updated = 0;
 }
 
 /* ulseek, uwrite, ...:  Check return value for errors. */
@@ -103,7 +108,8 @@ static ssize_t uwrite(void const *const buf, size_t const count)
 		}
 		if (!file_append) {
 			perror("write");
-			cleanup();
+			file_append_cleanup();
+			mmap_cleanup();
 			return -1;
 		}
 		if (file_ptr < file_end) {
@@ -129,12 +135,76 @@ static void * umalloc(size_t size)
 	void *const addr = malloc(size);
 	if (addr == 0) {
 		fprintf(stderr, "malloc failed: %zu bytes\n", size);
-		cleanup();
+		file_append_cleanup();
+		mmap_cleanup();
 		return NULL;
 	}
 	return addr;
 }
 
+/*
+ * Get the whole file as a programming convenience in order to avoid
+ * malloc+lseek+read+free of many pieces.  If successful, then mmap
+ * avoids copying unused pieces; else just read the whole file.
+ * Open for both read and write; new info will be appended to the file.
+ * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
+ * do not propagate to the file until an explicit overwrite at the last.
+ * This preserves most aspects of consistency (all except .st_size)
+ * for simultaneous readers of the file while we are appending to it.
+ * However, multiple writers still are bad.  We choose not to use
+ * locking because it is expensive and the use case of kernel build
+ * makes multiple writers unlikely.
+ */
+static void *mmap_file(char const *fname)
+{
+	/* Avoid problems if early cleanup() */
+	fd_map = -1;
+	mmap_failed = 1;
+	file_map = NULL;
+	file_ptr = NULL;
+	file_updated = 0;
+	sb.st_size = 0;
+
+	fd_map = open(fname, O_RDONLY);
+	if (fd_map < 0) {
+		perror(fname);
+		return NULL;
+	}
+	if (fstat(fd_map, &sb) < 0) {
+		perror(fname);
+		goto out;
+	}
+	if (!S_ISREG(sb.st_mode)) {
+		fprintf(stderr, "not a regular file: %s\n", fname);
+		goto out;
+	}
+	file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
+			fd_map, 0);
+	if (file_map == MAP_FAILED) {
+		mmap_failed = 1;
+		file_map = umalloc(sb.st_size);
+		if (!file_map) {
+			perror(fname);
+			goto out;
+		}
+		if (read(fd_map, file_map, sb.st_size) != sb.st_size) {
+			perror(fname);
+			free(file_map);
+			file_map = NULL;
+			goto out;
+		}
+	} else
+		mmap_failed = 0;
+out:
+	close(fd_map);
+	fd_map = -1;
+
+	file_end = file_map + sb.st_size;
+
+	return file_map;
+}
+
+
 static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
 static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
 static unsigned char *ideal_nop;
@@ -238,61 +308,6 @@ static int make_nop_arm64(void *map, size_t const offset)
 	return 0;
 }
 
-/*
- * Get the whole file as a programming convenience in order to avoid
- * malloc+lseek+read+free of many pieces.  If successful, then mmap
- * avoids copying unused pieces; else just read the whole file.
- * Open for both read and write; new info will be appended to the file.
- * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
- * do not propagate to the file until an explicit overwrite at the last.
- * This preserves most aspects of consistency (all except .st_size)
- * for simultaneous readers of the file while we are appending to it.
- * However, multiple writers still are bad.  We choose not to use
- * locking because it is expensive and the use case of kernel build
- * makes multiple writers unlikely.
- */
-static void *mmap_file(char const *fname)
-{
-	file_map = NULL;
-	sb.st_size = 0;
-	fd_map = open(fname, O_RDONLY);
-	if (fd_map < 0) {
-		perror(fname);
-		return NULL;
-	}
-	if (fstat(fd_map, &sb) < 0) {
-		perror(fname);
-		goto out;
-	}
-	if (!S_ISREG(sb.st_mode)) {
-		fprintf(stderr, "not a regular file: %s\n", fname);
-		goto out;
-	}
-	file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
-			fd_map, 0);
-	mmap_failed = 0;
-	if (file_map == MAP_FAILED) {
-		mmap_failed = 1;
-		file_map = umalloc(sb.st_size);
-		if (!file_map) {
-			perror(fname);
-			goto out;
-		}
-		if (read(fd_map, file_map, sb.st_size) != sb.st_size) {
-			perror(fname);
-			free(file_map);
-			file_map = NULL;
-			goto out;
-		}
-	}
-out:
-	close(fd_map);
-
-	file_end = file_map + sb.st_size;
-
-	return file_map;
-}
-
 static int write_file(const char *fname)
 {
 	char tmp_file[strlen(fname) + 4];
@@ -438,10 +453,11 @@ static void MIPS64_r_info(Elf64_Rel *const rp, unsigned sym, unsigned type)
 
 static int do_file(char const *const fname)
 {
-	Elf32_Ehdr *const ehdr = mmap_file(fname);
 	unsigned int reltype = 0;
+	Elf32_Ehdr *ehdr;
 	int rc = -1;
 
+	ehdr = mmap_file(fname);
 	if (!ehdr)
 		goto out;
 
@@ -577,7 +593,8 @@ static int do_file(char const *const fname)
 
 	rc = write_file(fname);
 out:
-	cleanup();
+	file_append_cleanup();
+	mmap_cleanup();
 	return rc;
 }
 
@@ -620,12 +637,6 @@ int main(int argc, char *argv[])
 		    strcmp(file + (len - ftrace_size), ftrace) == 0)
 			continue;
 
-		/* Avoid problems if early cleanup() */
-		fd_map = -1;
-		mmap_failed = 1;
-		file_map = NULL;
-		file_ptr = NULL;
-		file_updated = 0;
 		if (do_file(file)) {
 			fprintf(stderr, "%s: failed\n", file);
 			++n_error;
-- 
2.20.1



  parent reply	other threads:[~2019-09-05 15:44 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-05 15:42 [for-next][PATCH 00/25] tracing: Updates for 5.4 Steven Rostedt
2019-09-05 15:42 ` [for-next][PATCH 01/25] kprobes: Allow kprobes coexist with livepatch Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 02/25] recordmcount: Remove redundant strcmp Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 03/25] recordmcount: Remove uread() Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 04/25] recordmcount: Remove unused fd from uwrite() and ulseek() Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 05/25] tracing/probe: Split trace_event related data from trace_probe Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 06/25] tracing/dynevent: Delete all matched events Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 07/25] tracing/dynevent: Pass extra arguments to match operation Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 08/25] tracing/kprobe: Add multi-probe per event support Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 09/25] tracing/uprobe: Add multi-probe per uprobe " Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 10/25] tracing/kprobe: Add per-probe delete from event Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 11/25] tracing/uprobe: " Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 12/25] tracing/probe: Add immediate parameter support Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 13/25] tracing/probe: Add immediate string " Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 14/25] selftests/ftrace: Add a testcase for kprobe multiprobe event Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 15/25] selftests/ftrace: Add syntax error test for immediates Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 16/25] selftests/ftrace: Add syntax error test for multiprobe Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 17/25] recordmcount: Rewrite error/success handling Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 18/25] recordmcount: Kernel style function signature formatting Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 19/25] recordmcount: Kernel style formatting Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 20/25] recordmcount: Remove redundant cleanup() calls Steven Rostedt
2019-09-05 15:43 ` Steven Rostedt [this message]
2019-09-05 15:43 ` [for-next][PATCH 22/25] tracing/arm64: Have max stack tracer handle the case of return address after data Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 23/25] tracing: Document the stack trace algorithm in the comments Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 24/25] tracing: Rename tracing_reset() to tracing_reset_cpu() Steven Rostedt
2019-09-05 15:43 ` [for-next][PATCH 25/25] tracing: Add "gfp_t" support in synthetic_events Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190905154343.292050287@goodmis.org \
    --to=rostedt@goodmis.org \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mhelsley@vmware.com \
    --cc=mingo@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.