linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	stable@vger.kernel.org,
	"Peter Zijlstra (Intel)" <peterz@infradead.org>,
	Borislav Petkov <bp@suse.de>, Alexei Starovoitov <ast@kernel.org>,
	Josh Poimboeuf <jpoimboe@redhat.com>,
	Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Subject: [PATCH 5.15 19/89] bpf,x86: Simplify computing label offsets
Date: Fri, 22 Jul 2022 11:10:53 +0200	[thread overview]
Message-ID: <20220722091134.446936858@linuxfoundation.org> (raw)
In-Reply-To: <20220722091133.320803732@linuxfoundation.org>

From: Peter Zijlstra <peterz@infradead.org>

commit dceba0817ca329868a15e2e1dd46eb6340b69206 upstream.

Take an idea from the 32bit JIT, which uses the multi-pass nature of
the JIT to compute the instruction offsets on a prior pass in order to
compute the relative jump offsets on a later pass.

Application to the x86_64 JIT is slightly more involved because the
offsets depend on program variables (such as callee_regs_used and
stack_depth) and hence the computed offsets need to be kept in the
context of the JIT.

This removes, IMO quite fragile, code that hard-codes the offsets and
tries to compute the length of variable parts of it.

Convert both emit_bpf_tail_call_*() functions which have an out: label
at the end. Additionally emit_bpt_tail_call_direct() also has a poke
table entry, for which it computes the offset from the end (and thus
already relies on the previous pass to have computed addrs[i]), also
convert this to be a forward based offset.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Tested-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20211026120310.552304864@infradead.org
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 arch/x86/net/bpf_jit_comp.c |  123 +++++++++++++++-----------------------------
 1 file changed, 42 insertions(+), 81 deletions(-)

--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -224,6 +224,14 @@ static void jit_fill_hole(void *area, un
 
 struct jit_context {
 	int cleanup_addr; /* Epilogue code offset */
+
+	/*
+	 * Program specific offsets of labels in the code; these rely on the
+	 * JIT doing at least 2 passes, recording the position on the first
+	 * pass, only to generate the correct offset on the second pass.
+	 */
+	int tail_call_direct_label;
+	int tail_call_indirect_label;
 };
 
 /* Maximum number of bytes emitted while JITing one eBPF insn */
@@ -379,22 +387,6 @@ int bpf_arch_text_poke(void *ip, enum bp
 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
 }
 
-static int get_pop_bytes(bool *callee_regs_used)
-{
-	int bytes = 0;
-
-	if (callee_regs_used[3])
-		bytes += 2;
-	if (callee_regs_used[2])
-		bytes += 2;
-	if (callee_regs_used[1])
-		bytes += 2;
-	if (callee_regs_used[0])
-		bytes += 1;
-
-	return bytes;
-}
-
 /*
  * Generate the following code:
  *
@@ -410,29 +402,12 @@ static int get_pop_bytes(bool *callee_re
  * out:
  */
 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
-					u32 stack_depth)
+					u32 stack_depth, u8 *ip,
+					struct jit_context *ctx)
 {
 	int tcc_off = -4 - round_up(stack_depth, 8);
-	u8 *prog = *pprog;
-	int pop_bytes = 0;
-	int off1 = 42;
-	int off2 = 31;
-	int off3 = 9;
-
-	/* count the additional bytes used for popping callee regs from stack
-	 * that need to be taken into account for each of the offsets that
-	 * are used for bailing out of the tail call
-	 */
-	pop_bytes = get_pop_bytes(callee_regs_used);
-	off1 += pop_bytes;
-	off2 += pop_bytes;
-	off3 += pop_bytes;
-
-	if (stack_depth) {
-		off1 += 7;
-		off2 += 7;
-		off3 += 7;
-	}
+	u8 *prog = *pprog, *start = *pprog;
+	int offset;
 
 	/*
 	 * rdi - pointer to ctx
@@ -447,8 +422,9 @@ static void emit_bpf_tail_call_indirect(
 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
 	      offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
-	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+
+	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
+	EMIT2(X86_JBE, offset);                   /* jbe out */
 
 	/*
 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
@@ -456,8 +432,9 @@ static void emit_bpf_tail_call_indirect(
 	 */
 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
-	EMIT2(X86_JA, OFFSET2);                   /* ja out */
+
+	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
+	EMIT2(X86_JA, offset);                    /* ja out */
 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
 
@@ -470,12 +447,11 @@ static void emit_bpf_tail_call_indirect(
 	 *	goto out;
 	 */
 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
-#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
-	EMIT2(X86_JE, OFFSET3);                   /* je out */
 
-	*pprog = prog;
-	pop_callee_regs(pprog, callee_regs_used);
-	prog = *pprog;
+	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
+	EMIT2(X86_JE, offset);                    /* je out */
+
+	pop_callee_regs(&prog, callee_regs_used);
 
 	EMIT1(0x58);                              /* pop rax */
 	if (stack_depth)
@@ -495,38 +471,18 @@ static void emit_bpf_tail_call_indirect(
 	RETPOLINE_RCX_BPF_JIT();
 
 	/* out: */
+	ctx->tail_call_indirect_label = prog - start;
 	*pprog = prog;
 }
 
 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
-				      u8 **pprog, int addr, u8 *image,
-				      bool *callee_regs_used, u32 stack_depth)
+				      u8 **pprog, u8 *ip,
+				      bool *callee_regs_used, u32 stack_depth,
+				      struct jit_context *ctx)
 {
 	int tcc_off = -4 - round_up(stack_depth, 8);
-	u8 *prog = *pprog;
-	int pop_bytes = 0;
-	int off1 = 20;
-	int poke_off;
-
-	/* count the additional bytes used for popping callee regs to stack
-	 * that need to be taken into account for jump offset that is used for
-	 * bailing out from of the tail call when limit is reached
-	 */
-	pop_bytes = get_pop_bytes(callee_regs_used);
-	off1 += pop_bytes;
-
-	/*
-	 * total bytes for:
-	 * - nop5/ jmpq $off
-	 * - pop callee regs
-	 * - sub rsp, $val if depth > 0
-	 * - pop rax
-	 */
-	poke_off = X86_PATCH_SIZE + pop_bytes + 1;
-	if (stack_depth) {
-		poke_off += 7;
-		off1 += 7;
-	}
+	u8 *prog = *pprog, *start = *pprog;
+	int offset;
 
 	/*
 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
@@ -534,28 +490,30 @@ static void emit_bpf_tail_call_direct(st
 	 */
 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
-	EMIT2(X86_JA, off1);                          /* ja out */
+
+	offset = ctx->tail_call_direct_label - (prog + 2 - start);
+	EMIT2(X86_JA, offset);                        /* ja out */
 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
 
-	poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
+	poke->tailcall_bypass = ip + (prog - start);
 	poke->adj_off = X86_TAIL_CALL_OFFSET;
-	poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
+	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
 
 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
 		  poke->tailcall_bypass);
 
-	*pprog = prog;
-	pop_callee_regs(pprog, callee_regs_used);
-	prog = *pprog;
+	pop_callee_regs(&prog, callee_regs_used);
 	EMIT1(0x58);                                  /* pop rax */
 	if (stack_depth)
 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
 
 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
 	prog += X86_PATCH_SIZE;
+
 	/* out: */
+	ctx->tail_call_direct_label = prog - start;
 
 	*pprog = prog;
 }
@@ -1453,13 +1411,16 @@ st:			if (is_imm8(insn->off))
 		case BPF_JMP | BPF_TAIL_CALL:
 			if (imm32)
 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
-							  &prog, addrs[i], image,
+							  &prog, image + addrs[i - 1],
 							  callee_regs_used,
-							  bpf_prog->aux->stack_depth);
+							  bpf_prog->aux->stack_depth,
+							  ctx);
 			else
 				emit_bpf_tail_call_indirect(&prog,
 							    callee_regs_used,
-							    bpf_prog->aux->stack_depth);
+							    bpf_prog->aux->stack_depth,
+							    image + addrs[i - 1],
+							    ctx);
 			break;
 
 			/* cond jump */



  parent reply	other threads:[~2022-07-22  9:20 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-22  9:10 [PATCH 5.15 00/89] 5.15.57-rc1 review Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 01/89] x86/traps: Use pt_regs directly in fixup_bad_iret() Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 02/89] x86/entry: Switch the stack after error_entry() returns Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 03/89] x86/entry: Move PUSH_AND_CLEAR_REGS out of error_entry() Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 04/89] x86/entry: Dont call error_entry() for XENPV Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 05/89] objtool: Classify symbols Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 06/89] objtool: Explicitly avoid self modifying code in .altinstr_replacement Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 07/89] objtool: Shrink struct instruction Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 08/89] objtool,x86: Replace alternatives with .retpoline_sites Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 09/89] objtool: Introduce CFI hash Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 10/89] x86/retpoline: Remove unused replacement symbols Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 11/89] x86/asm: Fix register order Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 12/89] x86/asm: Fixup odd GEN-for-each-reg.h usage Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 13/89] x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 14/89] x86/retpoline: Create a retpoline thunk array Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 15/89] x86/alternative: Implement .retpoline_sites support Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 16/89] x86/alternative: Handle Jcc __x86_indirect_thunk_\reg Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 17/89] x86/alternative: Try inline spectre_v2=retpoline,amd Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 18/89] x86/alternative: Add debug prints to apply_retpolines() Greg Kroah-Hartman
2022-07-22  9:10 ` Greg Kroah-Hartman [this message]
2022-07-22  9:10 ` [PATCH 5.15 20/89] bpf,x86: Respect X86_FEATURE_RETPOLINE* Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 21/89] objtool: Default ignore INT3 for unreachable Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 22/89] x86/entry: Remove skip_r11rcx Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 23/89] x86/realmode: build with -D__DISABLE_EXPORTS Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 24/89] x86/kvm/vmx: Make noinstr clean Greg Kroah-Hartman
2022-07-22  9:10 ` [PATCH 5.15 25/89] x86/cpufeatures: Move RETPOLINE flags to word 11 Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 26/89] x86/retpoline: Cleanup some #ifdefery Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 27/89] x86/retpoline: Swizzle retpoline thunk Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 28/89] x86/retpoline: Use -mfunction-return Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 29/89] x86: Undo return-thunk damage Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 30/89] x86,objtool: Create .return_sites Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 31/89] objtool: skip non-text sections when adding return-thunk sites Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 32/89] x86,static_call: Use alternative RET encoding Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 33/89] x86/ftrace: " Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 34/89] x86/bpf: " Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 35/89] x86/kvm: Fix SETcc emulation for return thunks Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 36/89] x86/vsyscall_emu/64: Dont use RET in vsyscall emulation Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 37/89] x86/sev: Avoid using __x86_return_thunk Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 38/89] x86: Use return-thunk in asm code Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 39/89] x86/entry: Avoid very early RET Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 40/89] objtool: Treat .text.__x86.* as noinstr Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 41/89] x86: Add magic AMD return-thunk Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 42/89] x86/bugs: Report AMD retbleed vulnerability Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 43/89] x86/bugs: Add AMD retbleed= boot parameter Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 44/89] x86/bugs: Enable STIBP for JMP2RET Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 45/89] x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 46/89] x86/entry: Add kernel IBRS implementation Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 47/89] x86/bugs: Optimize SPEC_CTRL MSR writes Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 48/89] x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 49/89] x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation() Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 50/89] x86/bugs: Report Intel retbleed vulnerability Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 51/89] intel_idle: Disable IBRS during long idle Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 52/89] objtool: Update Retpoline validation Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 53/89] x86/xen: Rename SYS* entry points Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 54/89] x86/xen: Add UNTRAIN_RET Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 55/89] x86/bugs: Add retbleed=ibpb Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 56/89] x86/bugs: Do IBPB fallback check only once Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 57/89] objtool: Add entry UNRET validation Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 58/89] x86/cpu/amd: Add Spectral Chicken Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 59/89] x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 60/89] x86/speculation: Fix firmware entry SPEC_CTRL handling Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 61/89] x86/speculation: Fix SPEC_CTRL write on SMT state change Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 62/89] x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 63/89] x86/speculation: Remove x86_spec_ctrl_mask Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 64/89] objtool: Re-add UNWIND_HINT_{SAVE_RESTORE} Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 65/89] KVM: VMX: Flatten __vmx_vcpu_run() Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 66/89] KVM: VMX: Convert launched argument to flags Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 67/89] KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 68/89] KVM: VMX: Fix IBRS handling after vmexit Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 69/89] x86/speculation: Fill RSB on vmexit for IBRS Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 70/89] x86/common: Stamp out the stepping madness Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 71/89] x86/cpu/amd: Enumerate BTC_NO Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 72/89] x86/retbleed: Add fine grained Kconfig knobs Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 73/89] x86/bugs: Add Cannon lake to RETBleed affected CPU list Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 74/89] x86/entry: Move PUSH_AND_CLEAR_REGS() back into error_entry Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 75/89] x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 76/89] x86/kexec: Disable RET on kexec Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 77/89] x86/speculation: Disable RRSBA behavior Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 78/89] x86/static_call: Serialize __static_call_fixup() properly Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 79/89] x86/xen: Fix initialisation in hypercall_page after rethunk Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 80/89] x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 81/89] x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 82/89] efi/x86: use naked RET on mixed mode call wrapper Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 83/89] x86/kvm: fix FASTOP_SIZE when return thunks are enabled Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 84/89] KVM: emulate: do not adjust size of fastop and setcc subroutines Greg Kroah-Hartman
2022-07-22  9:11 ` [PATCH 5.15 85/89] tools arch x86: Sync the msr-index.h copy with the kernel sources Greg Kroah-Hartman
2022-07-22  9:12 ` [PATCH 5.15 86/89] tools headers cpufeatures: Sync " Greg Kroah-Hartman
2022-07-22  9:12 ` [PATCH 5.15 87/89] x86/bugs: Remove apostrophe typo Greg Kroah-Hartman
2022-07-22  9:12 ` [PATCH 5.15 88/89] um: Add missing apply_returns() Greg Kroah-Hartman
2022-07-22  9:12 ` [PATCH 5.15 89/89] x86: Use -mindirect-branch-cs-prefix for RETPOLINE builds Greg Kroah-Hartman
2022-07-22 12:14 ` [PATCH 5.15 00/89] 5.15.57-rc1 review Bagas Sanjaya
2022-07-22 17:58 ` Florian Fainelli
2022-07-22 19:36 ` Naresh Kamboju
2022-07-22 21:59 ` Guenter Roeck
2022-07-23  4:21 ` Ron Economos
2022-07-23  8:55 ` Sudip Mukherjee (Codethink)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220722091134.446936858@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=ast@kernel.org \
    --cc=bp@suse.de \
    --cc=cascardo@canonical.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).