All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
	Tim Chen <tim.c.chen@linux.intel.com>,
	Josh Poimboeuf <jpoimboe@kernel.org>,
	Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
	Johannes Wikner <kwikner@ethz.ch>,
	Alyssa Milburn <alyssa.milburn@linux.intel.com>,
	Jann Horn <jannh@google.com>, "H.J. Lu" <hjl.tools@gmail.com>,
	Joao Moreira <joao.moreira@intel.com>,
	Joseph Nuzman <joseph.nuzman@intel.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>
Subject: [patch 37/38] x86/bpf: Emit call depth accounting if required
Date: Sun, 17 Jul 2022 01:18:09 +0200 (CEST)	[thread overview]
Message-ID: <20220716230954.898341815@linutronix.de> (raw)
In-Reply-To: 20220716230344.239749011@linutronix.de

Ensure that calls in BPF jitted programs are emitting call depth accounting
when enabled to keep the call/return balanced. The return thunk jump is
already injected due to the earlier retbleed mitigations.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
---
 arch/x86/include/asm/alternative.h |    6 +++++
 arch/x86/kernel/callthunks.c       |   19 ++++++++++++++++
 arch/x86/net/bpf_jit_comp.c        |   43 ++++++++++++++++++++++++-------------
 3 files changed, 53 insertions(+), 15 deletions(-)

--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -95,6 +95,7 @@ extern void callthunks_patch_module_call
 extern void callthunks_module_free(struct module *mod);
 extern void *callthunks_translate_call_dest(void *dest);
 extern bool is_callthunk(void *addr);
+extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
 #else
 static __always_inline void callthunks_patch_builtin_calls(void) {}
 static __always_inline void
@@ -109,6 +110,11 @@ static __always_inline bool is_callthunk
 {
 	return false;
 }
+static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
+							  void *func)
+{
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_SMP
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -706,6 +706,25 @@ int callthunk_get_kallsym(unsigned int s
 	return ret;
 }
 
+#ifdef CONFIG_BPF_JIT
+int x86_call_depth_emit_accounting(u8 **pprog, void *func)
+{
+	unsigned int tmpl_size = callthunk_desc.template_size;
+	void *tmpl = callthunk_desc.template;
+
+	if (!thunks_initialized)
+		return 0;
+
+	/* Is function call target a thunk? */
+	if (is_callthunk(func))
+		return 0;
+
+	memcpy(*pprog, tmpl, tmpl_size);
+	*pprog += tmpl_size;
+	return tmpl_size;
+}
+#endif
+
 #ifdef CONFIG_MODULES
 void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
 					    struct module *mod)
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -340,6 +340,12 @@ static int emit_call(u8 **pprog, void *f
 	return emit_patch(pprog, func, ip, 0xE8);
 }
 
+static int emit_rsb_call(u8 **pprog, void *func, void *ip)
+{
+	x86_call_depth_emit_accounting(pprog, func);
+	return emit_patch(pprog, func, ip, 0xE8);
+}
+
 static int emit_jump(u8 **pprog, void *func, void *ip)
 {
 	return emit_patch(pprog, func, ip, 0xE9);
@@ -1431,19 +1437,26 @@ st:			if (is_imm8(insn->off))
 			break;
 
 			/* call */
-		case BPF_JMP | BPF_CALL:
+		case BPF_JMP | BPF_CALL: {
+			int offs;
+
 			func = (u8 *) __bpf_call_base + imm32;
 			if (tail_call_reachable) {
 				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
 				EMIT3_off32(0x48, 0x8B, 0x85,
 					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
-				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+				if (!imm32)
 					return -EINVAL;
+				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
 			} else {
-				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+				if (!imm32)
 					return -EINVAL;
+				offs = x86_call_depth_emit_accounting(&prog, func);
 			}
+			if (emit_call(&prog, func, image + addrs[i - 1] + offs))
+				return -EINVAL;
 			break;
+		}
 
 		case BPF_JMP | BPF_TAIL_CALL:
 			if (imm32)
@@ -1808,10 +1821,10 @@ static int invoke_bpf_prog(const struct
 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
 	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
 
-	if (emit_call(&prog,
-		      p->aux->sleepable ? __bpf_prog_enter_sleepable :
-		      __bpf_prog_enter, prog))
-			return -EINVAL;
+	if (emit_rsb_call(&prog,
+			  p->aux->sleepable ? __bpf_prog_enter_sleepable :
+			  __bpf_prog_enter, prog))
+		return -EINVAL;
 	/* remember prog start time returned by __bpf_prog_enter */
 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
 
@@ -1831,7 +1844,7 @@ static int invoke_bpf_prog(const struct
 			       (long) p->insnsi >> 32,
 			       (u32) (long) p->insnsi);
 	/* call JITed bpf program or interpreter */
-	if (emit_call(&prog, p->bpf_func, prog))
+	if (emit_rsb_call(&prog, p->bpf_func, prog))
 		return -EINVAL;
 
 	/*
@@ -1855,10 +1868,10 @@ static int invoke_bpf_prog(const struct
 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
 	/* arg3: lea rdx, [rbp - run_ctx_off] */
 	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
-	if (emit_call(&prog,
-		      p->aux->sleepable ? __bpf_prog_exit_sleepable :
-		      __bpf_prog_exit, prog))
-			return -EINVAL;
+	if (emit_rsb_call(&prog,
+			  p->aux->sleepable ? __bpf_prog_exit_sleepable :
+			  __bpf_prog_exit, prog))
+		return -EINVAL;
 
 	*pprog = prog;
 	return 0;
@@ -2123,7 +2136,7 @@ int arch_prepare_bpf_trampoline(struct b
 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
 		/* arg1: mov rdi, im */
 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
-		if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+		if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
 			ret = -EINVAL;
 			goto cleanup;
 		}
@@ -2151,7 +2164,7 @@ int arch_prepare_bpf_trampoline(struct b
 		restore_regs(m, &prog, nr_args, regs_off);
 
 		/* call original function */
-		if (emit_call(&prog, orig_call, prog)) {
+		if (emit_rsb_call(&prog, orig_call, prog)) {
 			ret = -EINVAL;
 			goto cleanup;
 		}
@@ -2194,7 +2207,7 @@ int arch_prepare_bpf_trampoline(struct b
 		im->ip_epilogue = prog;
 		/* arg1: mov rdi, im */
 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
-		if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+		if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
 			ret = -EINVAL;
 			goto cleanup;
 		}


  parent reply	other threads:[~2022-07-16 23:20 UTC|newest]

Thread overview: 142+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-16 23:17 [patch 00/38] x86/retbleed: Call depth tracking mitigation Thomas Gleixner
2022-07-16 23:17 ` [patch 01/38] x86/paravirt: Ensure proper alignment Thomas Gleixner
2022-07-16 23:17 ` [patch 02/38] x86/cpu: Use native_wrmsrl() in load_percpu_segment() Thomas Gleixner
2022-07-17  0:22   ` Andrew Cooper
2022-07-17 15:20     ` Linus Torvalds
2022-07-17 19:08     ` Thomas Gleixner
2022-07-17 20:08       ` Thomas Gleixner
2022-07-17 20:13         ` Thomas Gleixner
2022-07-17 21:54           ` Thomas Gleixner
2022-07-18  5:11             ` Juergen Gross
2022-07-18  6:54               ` Thomas Gleixner
2022-07-18  8:55                 ` Thomas Gleixner
2022-07-18  9:31                   ` Peter Zijlstra
2022-07-18 10:33                     ` Thomas Gleixner
2022-07-18 11:42                       ` Thomas Gleixner
2022-07-18 17:52   ` [patch 0/3] x86/cpu: Sanitize switch_to_new_gdt() Thomas Gleixner
2022-07-18 17:52   ` [patch 1/3] x86/cpu: Remove segment load from switch_to_new_gdt() Thomas Gleixner
2022-07-18 18:43     ` Linus Torvalds
2022-07-18 18:55       ` Thomas Gleixner
2022-07-18 17:52   ` [patch 2/3] x86/cpu: Get rid of redundant switch_to_new_gdt() invocations Thomas Gleixner
2022-07-18 17:52   ` [patch 3/3] x86/cpu: Re-enable stackprotector Thomas Gleixner
2022-07-16 23:17 ` [patch 03/38] x86/modules: Set VM_FLUSH_RESET_PERMS in module_alloc() Thomas Gleixner
2022-07-16 23:17 ` [patch 04/38] x86/vdso: Ensure all kernel code is seen by objtool Thomas Gleixner
2022-07-16 23:17 ` [patch 05/38] btree: Initialize early when builtin Thomas Gleixner
2022-07-16 23:17 ` [patch 06/38] objtool: Allow GS relative relocs Thomas Gleixner
2022-07-16 23:17 ` [patch 07/38] objtool: Track init section Thomas Gleixner
2022-07-16 23:17 ` [patch 08/38] objtool: Add .call_sites section Thomas Gleixner
2022-07-16 23:17 ` [patch 09/38] objtool: Add .sym_sites section Thomas Gleixner
2022-07-16 23:17 ` [patch 10/38] objtool: Add --hacks=skylake Thomas Gleixner
2022-07-16 23:17 ` [patch 11/38] objtool: Allow STT_NOTYPE -> STT_FUNC+0 tail-calls Thomas Gleixner
2022-07-16 23:17 ` [patch 12/38] x86/entry: Make sync_regs() invocation a tail call Thomas Gleixner
2022-07-16 23:17 ` [patch 13/38] x86/modules: Make module_alloc() generally available Thomas Gleixner
2022-07-16 23:17 ` [patch 14/38] x86/Kconfig: Add CONFIG_CALL_THUNKS Thomas Gleixner
2022-07-16 23:17 ` [patch 15/38] x86/retbleed: Add X86_FEATURE_CALL_DEPTH Thomas Gleixner
2022-07-16 23:17 ` [patch 16/38] modules: Make struct module_layout unconditionally available Thomas Gleixner
2022-07-16 23:17 ` [patch 17/38] module: Add arch_data to module_layout Thomas Gleixner
2022-07-16 23:17 ` [patch 18/38] mm/vmalloc: Provide huge page mappings Thomas Gleixner
2022-07-16 23:17 ` [patch 19/38] x86/module: Provide __module_alloc() Thomas Gleixner
2022-07-16 23:17 ` [patch 20/38] x86/alternatives: Provide text_poke_[copy|set]_locked() Thomas Gleixner
2022-07-16 23:17 ` [patch 21/38] x86/entry: Make some entry symbols global Thomas Gleixner
2022-07-16 23:17 ` [patch 22/38] x86/paravirt: Make struct paravirt_call_site unconditionally available Thomas Gleixner
2022-07-16 23:17 ` [patch 23/38] x86/callthunks: Add call patching for call depth tracking Thomas Gleixner
2022-07-16 23:17 ` [patch 24/38] module: Add layout for callthunks tracking Thomas Gleixner
2022-07-16 23:17 ` [patch 25/38] x86/modules: Add call thunk patching Thomas Gleixner
2022-07-16 23:17 ` [patch 26/38] x86/returnthunk: Allow different return thunks Thomas Gleixner
2022-07-16 23:17 ` [patch 27/38] x86/asm: Provide ALTERNATIVE_3 Thomas Gleixner
2022-07-16 23:17 ` [patch 28/38] x86/retbleed: Add SKL return thunk Thomas Gleixner
2022-07-16 23:17 ` [patch 29/38] x86/retpoline: Add SKL retthunk retpolines Thomas Gleixner
2022-07-16 23:17 ` [patch 30/38] x86/retbleed: Add SKL call thunk Thomas Gleixner
2022-07-16 23:18 ` [patch 31/38] x86/calldepth: Add ret/call counting for debug Thomas Gleixner
2022-07-16 23:18 ` [patch 32/38] static_call: Add call depth tracking support Thomas Gleixner
2022-07-16 23:18 ` [patch 33/38] kallsyms: Take callthunks into account Thomas Gleixner
2022-07-16 23:18 ` [patch 34/38] x86/orc: Make it callthunk aware Thomas Gleixner
2022-07-16 23:18 ` [patch 35/38] kprobes: Add callthunk blacklisting Thomas Gleixner
2022-07-16 23:18 ` [patch 36/38] x86/ftrace: Make it call depth tracking aware Thomas Gleixner
2022-07-18 21:01   ` Steven Rostedt
2022-07-19  8:46     ` Peter Zijlstra
2022-07-19 13:06       ` Steven Rostedt
2022-07-16 23:18 ` Thomas Gleixner [this message]
2022-07-19  5:30   ` [patch 37/38] x86/bpf: Emit call depth accounting if required Alexei Starovoitov
2022-07-19  8:34     ` Peter Zijlstra
2022-07-16 23:18 ` [patch 38/38] x86/retbleed: Add call depth tracking mitigation Thomas Gleixner
2022-07-17  9:45 ` [patch 00/38] x86/retbleed: Call " David Laight
2022-07-17 15:07   ` Thomas Gleixner
2022-07-17 17:56     ` David Laight
2022-07-17 19:15       ` Thomas Gleixner
2022-07-18 19:29 ` Thomas Gleixner
2022-07-18 19:30   ` Thomas Gleixner
2022-07-18 19:51     ` Linus Torvalds
2022-07-18 20:44       ` Thomas Gleixner
2022-07-18 21:01         ` Linus Torvalds
2022-07-18 21:43           ` Peter Zijlstra
2022-07-18 22:34             ` Linus Torvalds
2022-07-18 23:52               ` Peter Zijlstra
2022-07-18 21:18         ` Peter Zijlstra
2022-07-18 22:22           ` Thomas Gleixner
2022-07-18 22:47             ` Joao Moreira
2022-07-18 22:55               ` Sami Tolvanen
2022-07-18 23:08                 ` Joao Moreira
2022-07-18 23:19                 ` Thomas Gleixner
2022-07-18 23:42                   ` Linus Torvalds
2022-07-18 23:52                     ` Linus Torvalds
2022-07-18 23:57                       ` Peter Zijlstra
2022-07-19  0:03                         ` Linus Torvalds
2022-07-19  0:11                           ` Linus Torvalds
2022-07-19  0:23                             ` Peter Zijlstra
2022-07-19  1:02                               ` Linus Torvalds
2022-07-19 17:19                             ` Sami Tolvanen
2022-07-20 21:13                               ` Peter Zijlstra
2022-07-21  8:21                                 ` David Laight
2022-07-21 10:56                                   ` David Laight
2022-07-21 15:54                                 ` Peter Zijlstra
2022-07-21 17:55                                   ` Peter Zijlstra
2022-07-21 18:06                                     ` Linus Torvalds
2022-07-21 18:27                                       ` Peter Zijlstra
2022-07-21 18:32                                         ` Linus Torvalds
2022-07-21 20:22                                           ` Joao Moreira
2022-07-22  0:16                                         ` Sami Tolvanen
2022-07-22 10:23                                           ` Peter Zijlstra
2022-07-22 15:38                                             ` Sami Tolvanen
2022-07-21 22:01                                       ` David Laight
2022-07-22 11:03                                         ` Peter Zijlstra
2022-07-22 13:27                                           ` David Laight
2022-07-23  9:50                                   ` Thomas Gleixner
2022-07-19  0:01                       ` Linus Torvalds
2022-07-19  0:19                         ` Joao Moreira
2022-07-19 17:21                           ` Sami Tolvanen
2022-07-19 17:58                             ` Joao Moreira
2022-07-19  8:26                         ` David Laight
2022-07-19 16:27                           ` Linus Torvalds
2022-07-19 17:23                             ` Sami Tolvanen
2022-07-19 17:27                               ` Linus Torvalds
2022-07-19 18:06                                 ` Sami Tolvanen
2022-07-19 20:10                                   ` Peter Zijlstra
2022-07-18 22:48           ` Sami Tolvanen
2022-07-18 22:59             ` Thomas Gleixner
2022-07-18 23:10               ` Sami Tolvanen
2022-07-18 23:39               ` Linus Torvalds
2022-07-18 23:51             ` Peter Zijlstra
2022-07-20  9:00               ` Thomas Gleixner
2022-07-20 16:55               ` Sami Tolvanen
2022-07-20 19:42               ` Sami Tolvanen
2022-07-22 20:11         ` Tim Chen
2022-07-22 22:18           ` Linus Torvalds
2022-07-18 19:55 ` Thomas Gleixner
2022-07-19 10:24 ` Virt " Andrew Cooper
2022-07-19 14:13   ` Thomas Gleixner
2022-07-19 16:23     ` Andrew Cooper
2022-07-19 21:17       ` Thomas Gleixner
2022-07-19 14:45   ` Michael Kelley (LINUX)
2022-07-19 20:16     ` Peter Zijlstra
2022-07-20 16:57 ` [patch 00/38] x86/retbleed: " Steven Rostedt
2022-07-20 17:09   ` Linus Torvalds
2022-07-20 17:24     ` Peter Zijlstra
2022-07-20 17:50       ` Steven Rostedt
2022-07-20 18:07         ` Linus Torvalds
2022-07-20 18:31           ` Steven Rostedt
2022-07-20 18:43             ` Linus Torvalds
2022-07-20 19:11               ` Steven Rostedt
2022-07-20 19:36           ` Kees Cook
2022-07-20 19:43             ` Steven Rostedt
2022-07-20 21:36             ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220716230954.898341815@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=alyssa.milburn@linux.intel.com \
    --cc=ast@kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=hjl.tools@gmail.com \
    --cc=jannh@google.com \
    --cc=joao.moreira@intel.com \
    --cc=joseph.nuzman@intel.com \
    --cc=jpoimboe@kernel.org \
    --cc=kwikner@ethz.ch \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=rostedt@goodmis.org \
    --cc=tim.c.chen@linux.intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.