All of lore.kernel.org
 help / color / mirror / Atom feed
From: madvenka@linux.microsoft.com
To: broonie@kernel.org, mark.rutland@arm.com, jpoimboe@redhat.com,
	ardb@kernel.org, nobuta.keiya@fujitsu.com,
	sjitindarsingh@gmail.com, catalin.marinas@arm.com,
	will@kernel.org, jamorris@linux.microsoft.com,
	linux-arm-kernel@lists.infradead.org,
	live-patching@vger.kernel.org, linux-kernel@vger.kernel.org,
	madvenka@linux.microsoft.com
Subject: [RFC PATCH v15 5/6] arm64: Create a list of SYM_CODE functions, check return PC against list
Date: Fri, 17 Jun 2022 13:02:18 -0500	[thread overview]
Message-ID: <20220617180219.20352-6-madvenka@linux.microsoft.com> (raw)
In-Reply-To: <20220617180219.20352-1-madvenka@linux.microsoft.com>

From: "Madhavan T. Venkataraman" <madvenka@linux.microsoft.com>

SYM_CODE functions don't follow the usual calling conventions. Check if the
return PC in a stack frame falls in any of these. If it does, consider the
stack trace unreliable.

Define a special section for unreliable functions
=================================================

Define a SYM_CODE_END() macro for arm64 that adds the function address
range to a new section called "sym_code_functions".

Linker file
===========

Include the "sym_code_functions" section under read-only data in
vmlinux.lds.S.

Initialization
==============

Define an early_initcall() to create a sym_code_functions[] array from
the linker data.

Unwinder check
==============

Add a reliability check in unwind_check_reliability() that compares a
return PC with sym_code_functions[]. If there is a match, then return
failure.

Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/linkage.h  | 11 +++++++
 arch/arm64/include/asm/sections.h |  1 +
 arch/arm64/kernel/stacktrace.c    | 55 +++++++++++++++++++++++++++++++
 arch/arm64/kernel/vmlinux.lds.S   | 10 ++++++
 4 files changed, 77 insertions(+)

diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 43f8c25b3fda..d4058de4af78 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -39,4 +39,15 @@
 	SYM_START(name, SYM_L_WEAK, SYM_A_NONE)		\
 	bti c ;
 
+/*
+ * Record the address range of each SYM_CODE function in a struct code_range
+ * in a special section.
+ */
+#define SYM_CODE_END(name)				\
+	SYM_END(name, SYM_T_NONE)			;\
+99:	.pushsection "sym_code_functions", "aw"		;\
+	.quad	name					;\
+	.quad	99b					;\
+	.popsection
+
 #endif
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 40971ac1303f..50cfd1083563 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -22,6 +22,7 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
 extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
+extern char __sym_code_functions_start[], __sym_code_functions_end[];
 
 static inline size_t entry_tramp_text_size(void)
 {
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 5ef2ce217324..eda8581f7dbe 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -62,6 +62,31 @@ struct unwind_state {
 	bool reliable;
 };
 
+struct code_range {
+	unsigned long	start;
+	unsigned long	end;
+};
+
+static struct code_range	*sym_code_functions;
+static int			num_sym_code_functions;
+
+int __init init_sym_code_functions(void)
+{
+	size_t size = (unsigned long)__sym_code_functions_end -
+		      (unsigned long)__sym_code_functions_start;
+
+	sym_code_functions = (struct code_range *)__sym_code_functions_start;
+	/*
+	 * Order it so that sym_code_functions is not visible before
+	 * num_sym_code_functions.
+	 */
+	smp_mb();
+	num_sym_code_functions = size / sizeof(struct code_range);
+
+	return 0;
+}
+early_initcall(init_sym_code_functions);
+
 static void unwind_init_common(struct unwind_state *state,
 			       struct task_struct *task)
 {
@@ -251,6 +276,10 @@ NOKPROBE_SYMBOL(unwind_next);
  */
 static void unwind_check_reliability(struct unwind_state *state)
 {
+	const struct code_range *range;
+	unsigned long pc;
+	int i;
+
 	if (state->fp == state->final_fp) {
 		/* Final frame; no more unwind, no need to check reliability */
 		return;
@@ -263,6 +292,32 @@ static void unwind_check_reliability(struct unwind_state *state)
 	 */
 	if (!__kernel_text_address(state->pc))
 		state->reliable = false;
+
+	/*
+	 * Check the return PC against sym_code_functions[]. If there is a
+	 * match, then the consider the stack frame unreliable.
+	 *
+	 * As SYM_CODE functions don't follow the usual calling conventions,
+	 * we assume by default that any SYM_CODE function cannot be unwound
+	 * reliably.
+	 *
+	 * Note that this includes:
+	 *
+	 * - Exception handlers and entry assembly
+	 * - Trampoline assembly (e.g., ftrace, kprobes)
+	 * - Hypervisor-related assembly
+	 * - Hibernation-related assembly
+	 * - CPU start-stop, suspend-resume assembly
+	 * - Kernel relocation assembly
+	 */
+	pc = state->pc;
+	for (i = 0; i < num_sym_code_functions; i++) {
+		range = &sym_code_functions[i];
+		if (pc >= range->start && pc < range->end) {
+			state->reliable = false;
+			return;
+		}
+	}
 }
 
 static bool notrace unwind(struct unwind_state *state,
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 2d4a8f995175..414dbc82d0a6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -120,6 +120,14 @@ jiffies = jiffies_64;
 #define TRAMP_TEXT
 #endif
 
+#define SYM_CODE_FUNCTIONS				\
+	. = ALIGN(16);					\
+	.symcode : AT(ADDR(.symcode) - LOAD_OFFSET) {	\
+		__sym_code_functions_start = .;		\
+		KEEP(*(sym_code_functions))		\
+		__sym_code_functions_end = .;		\
+	}
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from _stext to _edata, must be a round multiple of the PE/COFF
@@ -212,6 +220,8 @@ SECTIONS
 	swapper_pg_dir = .;
 	. += PAGE_SIZE;
 
+	SYM_CODE_FUNCTIONS
+
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
 	__inittext_begin = .;
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: madvenka@linux.microsoft.com
To: broonie@kernel.org, mark.rutland@arm.com, jpoimboe@redhat.com,
	ardb@kernel.org, nobuta.keiya@fujitsu.com,
	sjitindarsingh@gmail.com, catalin.marinas@arm.com,
	will@kernel.org, jamorris@linux.microsoft.com,
	linux-arm-kernel@lists.infradead.org,
	live-patching@vger.kernel.org, linux-kernel@vger.kernel.org,
	madvenka@linux.microsoft.com
Subject: [RFC PATCH v15 5/6] arm64: Create a list of SYM_CODE functions, check return PC against list
Date: Fri, 17 Jun 2022 13:02:18 -0500	[thread overview]
Message-ID: <20220617180219.20352-6-madvenka@linux.microsoft.com> (raw)
In-Reply-To: <20220617180219.20352-1-madvenka@linux.microsoft.com>

From: "Madhavan T. Venkataraman" <madvenka@linux.microsoft.com>

SYM_CODE functions don't follow the usual calling conventions. Check if the
return PC in a stack frame falls in any of these. If it does, consider the
stack trace unreliable.

Define a special section for unreliable functions
=================================================

Define a SYM_CODE_END() macro for arm64 that adds the function address
range to a new section called "sym_code_functions".

Linker file
===========

Include the "sym_code_functions" section under read-only data in
vmlinux.lds.S.

Initialization
==============

Define an early_initcall() to create a sym_code_functions[] array from
the linker data.

Unwinder check
==============

Add a reliability check in unwind_check_reliability() that compares a
return PC with sym_code_functions[]. If there is a match, then return
failure.

Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/linkage.h  | 11 +++++++
 arch/arm64/include/asm/sections.h |  1 +
 arch/arm64/kernel/stacktrace.c    | 55 +++++++++++++++++++++++++++++++
 arch/arm64/kernel/vmlinux.lds.S   | 10 ++++++
 4 files changed, 77 insertions(+)

diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 43f8c25b3fda..d4058de4af78 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -39,4 +39,15 @@
 	SYM_START(name, SYM_L_WEAK, SYM_A_NONE)		\
 	bti c ;
 
+/*
+ * Record the address range of each SYM_CODE function in a struct code_range
+ * in a special section.
+ */
+#define SYM_CODE_END(name)				\
+	SYM_END(name, SYM_T_NONE)			;\
+99:	.pushsection "sym_code_functions", "aw"		;\
+	.quad	name					;\
+	.quad	99b					;\
+	.popsection
+
 #endif
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 40971ac1303f..50cfd1083563 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -22,6 +22,7 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __mmuoff_data_start[], __mmuoff_data_end[];
 extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
 extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
+extern char __sym_code_functions_start[], __sym_code_functions_end[];
 
 static inline size_t entry_tramp_text_size(void)
 {
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 5ef2ce217324..eda8581f7dbe 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -62,6 +62,31 @@ struct unwind_state {
 	bool reliable;
 };
 
+struct code_range {
+	unsigned long	start;
+	unsigned long	end;
+};
+
+static struct code_range	*sym_code_functions;
+static int			num_sym_code_functions;
+
+int __init init_sym_code_functions(void)
+{
+	size_t size = (unsigned long)__sym_code_functions_end -
+		      (unsigned long)__sym_code_functions_start;
+
+	sym_code_functions = (struct code_range *)__sym_code_functions_start;
+	/*
+	 * Order it so that sym_code_functions is not visible before
+	 * num_sym_code_functions.
+	 */
+	smp_mb();
+	num_sym_code_functions = size / sizeof(struct code_range);
+
+	return 0;
+}
+early_initcall(init_sym_code_functions);
+
 static void unwind_init_common(struct unwind_state *state,
 			       struct task_struct *task)
 {
@@ -251,6 +276,10 @@ NOKPROBE_SYMBOL(unwind_next);
  */
 static void unwind_check_reliability(struct unwind_state *state)
 {
+	const struct code_range *range;
+	unsigned long pc;
+	int i;
+
 	if (state->fp == state->final_fp) {
 		/* Final frame; no more unwind, no need to check reliability */
 		return;
@@ -263,6 +292,32 @@ static void unwind_check_reliability(struct unwind_state *state)
 	 */
 	if (!__kernel_text_address(state->pc))
 		state->reliable = false;
+
+	/*
+	 * Check the return PC against sym_code_functions[]. If there is a
+	 * match, then the consider the stack frame unreliable.
+	 *
+	 * As SYM_CODE functions don't follow the usual calling conventions,
+	 * we assume by default that any SYM_CODE function cannot be unwound
+	 * reliably.
+	 *
+	 * Note that this includes:
+	 *
+	 * - Exception handlers and entry assembly
+	 * - Trampoline assembly (e.g., ftrace, kprobes)
+	 * - Hypervisor-related assembly
+	 * - Hibernation-related assembly
+	 * - CPU start-stop, suspend-resume assembly
+	 * - Kernel relocation assembly
+	 */
+	pc = state->pc;
+	for (i = 0; i < num_sym_code_functions; i++) {
+		range = &sym_code_functions[i];
+		if (pc >= range->start && pc < range->end) {
+			state->reliable = false;
+			return;
+		}
+	}
 }
 
 static bool notrace unwind(struct unwind_state *state,
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 2d4a8f995175..414dbc82d0a6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -120,6 +120,14 @@ jiffies = jiffies_64;
 #define TRAMP_TEXT
 #endif
 
+#define SYM_CODE_FUNCTIONS				\
+	. = ALIGN(16);					\
+	.symcode : AT(ADDR(.symcode) - LOAD_OFFSET) {	\
+		__sym_code_functions_start = .;		\
+		KEEP(*(sym_code_functions))		\
+		__sym_code_functions_end = .;		\
+	}
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from _stext to _edata, must be a round multiple of the PE/COFF
@@ -212,6 +220,8 @@ SECTIONS
 	swapper_pg_dir = .;
 	. += PAGE_SIZE;
 
+	SYM_CODE_FUNCTIONS
+
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
 	__inittext_begin = .;
-- 
2.25.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-06-17 18:03 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <ff68fb850d42e1adaa6a0a6c9c258acabb898b24>
2022-06-17 18:02 ` [RFC PATCH v15 0/6] arm64: Reorganize the unwinder and implement stack trace reliability checks madvenka
2022-06-17 18:02   ` madvenka
2022-06-17 18:02   ` [RFC PATCH v15 1/6] arm64: Split unwind_init() madvenka
2022-06-17 18:02     ` madvenka
2022-06-17 18:02   ` [RFC PATCH v15 2/6] arm64: Copy the task argument to unwind_state madvenka
2022-06-17 18:02     ` madvenka
2022-06-17 18:02   ` [RFC PATCH v15 3/6] arm64: Make the unwind loop in unwind() similar to other architectures madvenka
2022-06-17 18:02     ` madvenka
2022-06-17 18:02   ` [RFC PATCH v15 4/6] arm64: Introduce stack trace reliability checks in the unwinder madvenka
2022-06-17 18:02     ` madvenka
2022-06-17 18:02   ` madvenka [this message]
2022-06-17 18:02     ` [RFC PATCH v15 5/6] arm64: Create a list of SYM_CODE functions, check return PC against list madvenka
2022-06-17 18:02   ` [RFC PATCH v15 6/6] arm64: Introduce arch_stack_walk_reliable() madvenka
2022-06-17 18:02     ` madvenka
2022-06-17 20:50   ` [RFC PATCH v15 0/6] arm64: Reorganize the unwinder and implement stack trace reliability checks Madhavan T. Venkataraman
2022-06-17 20:50     ` Madhavan T. Venkataraman
2022-06-27 13:00   ` Will Deacon
2022-06-27 13:00     ` Will Deacon
2022-06-27 17:06     ` Madhavan T. Venkataraman
2022-06-27 17:06       ` Madhavan T. Venkataraman
2022-06-17 21:07 ` [PATCH " madvenka
2022-06-17 21:07   ` madvenka
2022-06-17 21:07   ` [PATCH v15 1/6] arm64: Split unwind_init() madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  7:39     ` Mark Rutland
2022-06-26  7:39       ` Mark Rutland
2022-06-17 21:07   ` [PATCH v15 2/6] arm64: Copy the task argument to unwind_state madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  7:39     ` Mark Rutland
2022-06-26  7:39       ` Mark Rutland
2022-06-17 21:07   ` [PATCH v15 3/6] arm64: Make the unwind loop in unwind() similar to other architectures madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  8:21     ` Mark Rutland
2022-06-26  8:21       ` Mark Rutland
2022-06-27  4:51       ` Madhavan T. Venkataraman
2022-06-27  4:51         ` Madhavan T. Venkataraman
2022-06-17 21:07   ` [PATCH v15 4/6] arm64: Introduce stack trace reliability checks in the unwinder madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  8:32     ` Mark Rutland
2022-06-26  8:32       ` Mark Rutland
2022-06-27  5:01       ` Madhavan T. Venkataraman
2022-06-27  5:01         ` Madhavan T. Venkataraman
2022-06-17 21:07   ` [PATCH v15 5/6] arm64: Create a list of SYM_CODE functions, check return PC against list madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  8:46     ` Mark Rutland
2022-06-26  8:46       ` Mark Rutland
2022-06-27  5:06       ` Madhavan T. Venkataraman
2022-06-27  5:06         ` Madhavan T. Venkataraman
2022-06-17 21:07   ` [PATCH v15 6/6] arm64: Introduce arch_stack_walk_reliable() madvenka
2022-06-17 21:07     ` madvenka
2022-06-26  8:57     ` Mark Rutland
2022-06-26  8:57       ` Mark Rutland
2022-06-27  5:53       ` Madhavan T. Venkataraman
2022-06-27  5:53         ` Madhavan T. Venkataraman
2022-06-23 17:32   ` [PATCH v15 0/6] arm64: Reorganize the unwinder and implement stack trace reliability checks Will Deacon
2022-06-23 17:32     ` Will Deacon
2022-06-24  5:19     ` Madhavan T. Venkataraman
2022-06-24  5:19       ` Madhavan T. Venkataraman
2022-06-24  5:27       ` Madhavan T. Venkataraman
2022-06-24  5:27         ` Madhavan T. Venkataraman
2022-06-26  9:18       ` Mark Rutland
2022-06-26  9:18         ` Mark Rutland
2022-06-27  4:33         ` Madhavan T. Venkataraman
2022-06-27  4:33           ` Madhavan T. Venkataraman
2022-06-27 16:32           ` Kalesh Singh
2022-06-27 16:32             ` Kalesh Singh
2022-06-27 17:04             ` Madhavan T. Venkataraman
2022-06-27 17:04               ` Madhavan T. Venkataraman
2022-06-27  4:48         ` Madhavan T. Venkataraman
2022-06-27  4:48           ` Madhavan T. Venkataraman
2022-06-27  9:42           ` Will Deacon
2022-06-27  9:42             ` Will Deacon
2022-06-24 11:42     ` Mark Brown
2022-06-24 11:42       ` Mark Brown
2022-06-24 22:15       ` Madhavan T. Venkataraman
2022-06-24 22:15         ` Madhavan T. Venkataraman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220617180219.20352-6-madvenka@linux.microsoft.com \
    --to=madvenka@linux.microsoft.com \
    --cc=ardb@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=jamorris@linux.microsoft.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=live-patching@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=nobuta.keiya@fujitsu.com \
    --cc=sjitindarsingh@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.