All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Frederic Weisbecker <frederic@kernel.org>,
	LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>,
	Michal Hocko <mhocko@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Ingo Molnar <mingo@redhat.com>, Michal Hocko <mhocko@suse.com>,
	rostedt@goodmis.org, jbaron@akamai.com, ardb@kernel.org
Subject: Re: [RFC PATCH 6/8] preempt/dynamic: Provide preempt_schedule[_notrace]() static calls
Date: Fri, 5 Feb 2021 16:30:56 +0100	[thread overview]
Message-ID: <YB1ksCPhD40f0VQn@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <YBqtZwIbj6kQEiEd@hirez.programming.kicks-ass.net>

On Wed, Feb 03, 2021 at 03:04:23PM +0100, Peter Zijlstra wrote:
> Fair enough I suppose. I'll slap a changelog and your SoB on it and I
> suppose I'll got commit the whole lot. Then we can forget about it
> again.

FWIW, the whole thing looks like this..

---
Subject: static_call: Allow module use without exposing static_call_key
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Wed, 27 Jan 2021 17:18:37 -0600

From: Josh Poimboeuf <jpoimboe@redhat.com>

When exporting static_call_key; with EXPORT_STATIC_CALL*(), the module
can use static_call_update() to change the function called.  This is
not desirable in general.

Not exporting static_call_key however also disallows usage of
static_call(), since objtool needs the key to construct the
static_call_site.

Solve this by allowing objtool to create the static_call_site using
the trampoline address when it builds a module and cannot find the
static_call_key symbol. The module loader will then try and map the
trampole back to a key before it constructs the normal sites list.

Doing this requires a trampoline -> key associsation, so add another
magic section that keeps those.

Originally-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210127231837.ifddpn7rhwdaepiu@treble
---
 arch/x86/include/asm/static_call.h      |    7 ++++
 include/asm-generic/vmlinux.lds.h       |    5 ++
 include/linux/static_call.h             |   22 +++++++++++-
 include/linux/static_call_types.h       |   27 ++++++++++++++-
 kernel/static_call.c                    |   55 ++++++++++++++++++++++++++++++--
 tools/include/linux/static_call_types.h |   27 ++++++++++++++-
 tools/objtool/check.c                   |   17 ++++++++-
 7 files changed, 149 insertions(+), 11 deletions(-)

--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -37,4 +37,11 @@
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\
 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
 
+
+#define ARCH_ADD_TRAMP_KEY(name)					\
+	asm(".pushsection .static_call_tramp_key, \"a\"		\n"	\
+	    ".long " STATIC_CALL_TRAMP_STR(name) " - .		\n"	\
+	    ".long " STATIC_CALL_KEY_STR(name) " - .		\n"	\
+	    ".popsection					\n")
+
 #endif /* _ASM_STATIC_CALL_H */
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -393,7 +393,10 @@
 	. = ALIGN(8);							\
 	__start_static_call_sites = .;					\
 	KEEP(*(.static_call_sites))					\
-	__stop_static_call_sites = .;
+	__stop_static_call_sites = .;					\
+	__start_static_call_tramp_key = .;				\
+	KEEP(*(.static_call_tramp_key))					\
+	__stop_static_call_tramp_key = .;
 
 /*
  * Allow architectures to handle ro_after_init data on their
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -138,6 +138,12 @@ struct static_call_key {
 	};
 };
 
+/* For finding the key associated with a trampoline */
+struct static_call_tramp_key {
+	s32 tramp;
+	s32 key;
+};
+
 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
 extern int static_call_mod_init(struct module *mod);
 extern int static_call_text_reserved(void *start, void *end);
@@ -165,11 +171,18 @@ extern long __static_call_return0(void);
 #define EXPORT_STATIC_CALL(name)					\
 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
-
 #define EXPORT_STATIC_CALL_GPL(name)					\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
 
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name)					\
+	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name));				\
+	ARCH_ADD_TRAMP_KEY(name)
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
+	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name));			\
+	ARCH_ADD_TRAMP_KEY(name)
+
 #elif defined(CONFIG_HAVE_STATIC_CALL)
 
 static inline int static_call_init(void) { return 0; }
@@ -216,11 +229,16 @@ static inline long __static_call_return0
 #define EXPORT_STATIC_CALL(name)					\
 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
-
 #define EXPORT_STATIC_CALL_GPL(name)					\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
 
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name)					\
+	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
+	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
 #else /* Generic implementation */
 
 static inline int static_call_init(void) { return 0; }
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -10,6 +10,7 @@
 #define STATIC_CALL_KEY_PREFIX_STR	__stringify(STATIC_CALL_KEY_PREFIX)
 #define STATIC_CALL_KEY_PREFIX_LEN	(sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
 #define STATIC_CALL_KEY(name)		__PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name)	__stringify(STATIC_CALL_KEY(name))
 
 #define STATIC_CALL_TRAMP_PREFIX	__SCT__
 #define STATIC_CALL_TRAMP_PREFIX_STR	__stringify(STATIC_CALL_TRAMP_PREFIX)
@@ -39,17 +40,39 @@ struct static_call_site {
 
 #ifdef CONFIG_HAVE_STATIC_CALL
 
+#define __raw_static_call(name)	(&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
 /*
  * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
  * the symbol table so that objtool can reference it when it generates the
  * .static_call_sites section.
  */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+	__ADDRESSABLE(STATIC_CALL_KEY(name))
+
 #define __static_call(name)						\
 ({									\
-	__ADDRESSABLE(STATIC_CALL_KEY(name));				\
-	&STATIC_CALL_TRAMP(name);					\
+	__STATIC_CALL_ADDRESSABLE(name);				\
+	__raw_static_call(name);					\
 })
 
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name)	__raw_static_call(name)
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name)	__raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name)	__static_call(name)
+#endif
+
 #define static_call(name)	__static_call(name)
 
 #else
--- a/kernel/static_call.c
+++ b/kernel/static_call.c
@@ -12,6 +12,8 @@
 
 extern struct static_call_site __start_static_call_sites[],
 			       __stop_static_call_sites[];
+extern struct static_call_tramp_key __start_static_call_tramp_key[],
+				    __stop_static_call_tramp_key[];
 
 static bool static_call_initialized;
 
@@ -323,10 +325,59 @@ static int __static_call_mod_text_reserv
 	return ret;
 }
 
+static unsigned long tramp_key_lookup(unsigned long addr)
+{
+	struct static_call_tramp_key *start = __start_static_call_tramp_key;
+	struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
+	struct static_call_tramp_key *tramp_key;
+
+	for (tramp_key = start; tramp_key != stop; tramp_key++) {
+		unsigned long tramp;
+
+		tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
+		if (tramp == addr)
+			return (long)tramp_key->key + (long)&tramp_key->key;
+	}
+
+	return 0;
+}
+
 static int static_call_add_module(struct module *mod)
 {
-	return __static_call_init(mod, mod->static_call_sites,
-				  mod->static_call_sites + mod->num_static_call_sites);
+	struct static_call_site *start = mod->static_call_sites;
+	struct static_call_site *stop = start + mod->num_static_call_sites;
+	struct static_call_site *site;
+
+	for (site = start; site != stop; site++) {
+		unsigned long addr = (unsigned long)static_call_key(site);
+		unsigned long key;
+
+		/*
+		 * Is the key is exported, 'addr' points to the key, which
+		 * means modules are allowed to call static_call_update() on
+		 * it.
+		 *
+		 * Otherwise, the key isn't exported, and 'addr' points to the
+		 * trampoline so we need to lookup the key.
+		 *
+		 * We go through this dance to prevent crazy modules from
+		 * abusing sensitive static calls.
+		 */
+		if (!kernel_text_address(addr))
+			continue;
+
+		key = tramp_key_lookup(addr);
+		if (!key) {
+			pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
+				static_call_addr(site));
+			return -EINVAL;
+		}
+
+		site->key = (key - (long)&site->key) |
+			    (site->key & STATIC_CALL_SITE_FLAGS);
+	}
+
+	return __static_call_init(mod, start, stop);
 }
 
 static void static_call_del_module(struct module *mod)
--- a/tools/include/linux/static_call_types.h
+++ b/tools/include/linux/static_call_types.h
@@ -10,6 +10,7 @@
 #define STATIC_CALL_KEY_PREFIX_STR	__stringify(STATIC_CALL_KEY_PREFIX)
 #define STATIC_CALL_KEY_PREFIX_LEN	(sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
 #define STATIC_CALL_KEY(name)		__PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name)	__stringify(STATIC_CALL_KEY(name))
 
 #define STATIC_CALL_TRAMP_PREFIX	__SCT__
 #define STATIC_CALL_TRAMP_PREFIX_STR	__stringify(STATIC_CALL_TRAMP_PREFIX)
@@ -39,17 +40,39 @@ struct static_call_site {
 
 #ifdef CONFIG_HAVE_STATIC_CALL
 
+#define __raw_static_call(name)	(&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
 /*
  * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
  * the symbol table so that objtool can reference it when it generates the
  * .static_call_sites section.
  */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+	__ADDRESSABLE(STATIC_CALL_KEY(name))
+
 #define __static_call(name)						\
 ({									\
-	__ADDRESSABLE(STATIC_CALL_KEY(name));				\
-	&STATIC_CALL_TRAMP(name);					\
+	__STATIC_CALL_ADDRESSABLE(name);				\
+	__raw_static_call(name);					\
 })
 
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name)	__raw_static_call(name)
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name)	__raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name)	__static_call(name)
+#endif
+
 #define static_call(name)	__static_call(name)
 
 #else
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -507,8 +507,21 @@ static int create_static_call_sections(s
 
 		key_sym = find_symbol_by_name(file->elf, tmp);
 		if (!key_sym) {
-			WARN("static_call: can't find static_call_key symbol: %s", tmp);
-			return -1;
+			if (!module) {
+				WARN("static_call: can't find static_call_key symbol: %s", tmp);
+				return -1;
+			}
+
+			/*
+			 * For modules(), the key might not be exported, which
+			 * means the module can make static calls but isn't
+			 * allowed to change them.
+			 *
+			 * In that case we temporarily set the key to be the
+			 * trampoline address.  This is fixed up in
+			 * static_call_add_module().
+			 */
+			key_sym = insn->call_dest;
 		}
 		free(key_name);
 

  reply	other threads:[~2021-02-05 18:02 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-18 14:12 [RFC PATCH 0/7] preempt: Tune preemption flavour on boot v4 Frederic Weisbecker
2021-01-18 14:12 ` [RFC PATCH 1/8] static_call/x86: Add __static_call_return0() Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra
2021-01-18 14:12 ` [RFC PATCH 2/8] static_call: Provide DEFINE_STATIC_CALL_RET0() Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Frederic Weisbecker
2021-02-17 13:17   ` tip-bot2 for Frederic Weisbecker
2021-01-18 14:12 ` [RFC PATCH 3/8] static_call: Pull some static_call declarations to the type headers Frederic Weisbecker
2021-01-18 17:06   ` kernel test robot
2021-01-19 10:26   ` kernel test robot
2021-01-19 10:46   ` Jürgen Groß
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra
2021-01-18 14:12 ` [RFC PATCH 4/8] preempt: Introduce CONFIG_PREEMPT_DYNAMIC Frederic Weisbecker
2021-01-22 16:53   ` Peter Zijlstra
2021-01-28 12:17     ` Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Michal Hocko
2021-02-17 13:17   ` tip-bot2 for Michal Hocko
2021-01-18 14:12 ` [RFC PATCH 5/8] preempt/dynamic: Provide cond_resched() and might_resched() static calls Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra (Intel)
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra (Intel)
2021-01-18 14:12 ` [RFC PATCH 6/8] preempt/dynamic: Provide preempt_schedule[_notrace]() " Frederic Weisbecker
2021-01-21 21:58   ` Peter Zijlstra
2021-01-21 22:25     ` Peter Zijlstra
2021-01-22 16:52   ` Peter Zijlstra
2021-01-22 16:57     ` Ard Biesheuvel
2021-01-22 17:08       ` Peter Zijlstra
2021-02-08 12:00         ` [tip: sched/core] sched: Add /debug/sched_preempt tip-bot2 for Peter Zijlstra
2021-02-09 15:45         ` tip-bot2 for Peter Zijlstra
2021-02-17 13:17         ` tip-bot2 for Peter Zijlstra
2021-01-25 23:40     ` [RFC PATCH 6/8] preempt/dynamic: Provide preempt_schedule[_notrace]() static calls Josh Poimboeuf
2021-01-26  9:24       ` Peter Zijlstra
2021-01-26 23:57     ` Josh Poimboeuf
2021-01-27  9:13       ` Peter Zijlstra
2021-01-27 11:27         ` Peter Zijlstra
2021-01-27 15:59           ` Josh Poimboeuf
2021-01-27 16:19             ` Peter Zijlstra
2021-01-27 16:33               ` Josh Poimboeuf
2021-01-27 18:44                 ` Peter Zijlstra
2021-01-27 19:00                   ` Josh Poimboeuf
2021-01-27 19:02                     ` Josh Poimboeuf
2021-01-27 23:18                       ` Josh Poimboeuf
2021-02-03 14:04                         ` Peter Zijlstra
2021-02-05 15:30                           ` Peter Zijlstra [this message]
2021-02-06  2:31                             ` Josh Poimboeuf
2021-02-06  9:03                               ` Peter Zijlstra
2021-02-05 15:22                         ` Peter Zijlstra
2021-02-08 12:00                         ` [tip: sched/core] static_call: Allow module use without exposing static_call_key tip-bot2 for Josh Poimboeuf
2021-02-09 15:45                         ` tip-bot2 for Josh Poimboeuf
2021-02-17 13:17                         ` tip-bot2 for Josh Poimboeuf
2021-02-08 12:00   ` [tip: sched/core] preempt/dynamic: Provide preempt_schedule[_notrace]() static calls tip-bot2 for Peter Zijlstra (Intel)
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra (Intel)
2021-01-18 14:12 ` [RFC PATCH 7/8] preempt/dynamic: Provide irqentry_exit_cond_resched() static call Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra (Intel)
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra (Intel)
2021-01-18 14:12 ` [RFC PATCH 8/8] preempt/dynamic: Support dynamic preempt with preempt= boot option Frederic Weisbecker
2021-02-08 12:00   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra (Intel)
2021-02-09 15:45   ` tip-bot2 for Peter Zijlstra (Intel)
2021-02-17 13:17   ` tip-bot2 for Peter Zijlstra (Intel)
2021-01-21 21:22 ` [RFC PATCH 0/7] preempt: Tune preemption flavour on boot v4 Peter Zijlstra
2021-01-22 15:02 ` Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YB1ksCPhD40f0VQn@hirez.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=ardb@kernel.org \
    --cc=frederic@kernel.org \
    --cc=jbaron@akamai.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mingo@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.