linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "tip-bot2 for Thomas Gleixner" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	"Peter Zijlstra (Intel)" <peterz@infradead.org>,
	x86@kernel.org, linux-kernel@vger.kernel.org
Subject: [tip: x86/core] x86/percpu: Move irq_stack variables next to current_task
Date: Mon, 17 Oct 2022 14:54:05 -0000	[thread overview]
Message-ID: <166601844549.401.13580790256557728644.tip-bot2@tip-bot2> (raw)
In-Reply-To: <20220915111145.599170752@infradead.org>

The following commit has been merged into the x86/core branch of tip:

Commit-ID:     d7b6d709a76a4f4ef3108ac41e1b39eb80f5c084
Gitweb:        https://git.kernel.org/tip/d7b6d709a76a4f4ef3108ac41e1b39eb80f5c084
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Thu, 15 Sep 2022 13:11:05 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 17 Oct 2022 16:41:05 +02:00

x86/percpu: Move irq_stack variables next to current_task

Further extend struct pcpu_hot with the hard and soft irq stack
pointers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111145.599170752@infradead.org
---
 arch/x86/include/asm/current.h   |  6 ++++++
 arch/x86/include/asm/irq_stack.h | 12 ++++++------
 arch/x86/include/asm/processor.h |  4 ----
 arch/x86/kernel/cpu/common.c     |  3 ---
 arch/x86/kernel/dumpstack_32.c   |  4 ++--
 arch/x86/kernel/dumpstack_64.c   |  2 +-
 arch/x86/kernel/irq_32.c         | 13 +++++--------
 arch/x86/kernel/irq_64.c         |  6 +++---
 arch/x86/kernel/process_64.c     |  2 +-
 9 files changed, 24 insertions(+), 28 deletions(-)

diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 2dd0131..ac3090d 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -18,6 +18,12 @@ struct pcpu_hot {
 			int			preempt_count;
 			int			cpu_number;
 			unsigned long		top_of_stack;
+			void			*hardirq_stack_ptr;
+#ifdef CONFIG_X86_64
+			bool			hardirq_stack_inuse;
+#else
+			void			*softirq_stack_ptr;
+#endif
 		};
 		u8	pad[64];
 	};
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 147cb8f..7981838 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -116,7 +116,7 @@
 	ASM_CALL_ARG2
 
 #define call_on_irqstack(func, asm_call, argconstr...)			\
-	call_on_stack(__this_cpu_read(hardirq_stack_ptr),		\
+	call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr),	\
 		      func, asm_call, argconstr)
 
 /* Macros to assert type correctness for run_*_on_irqstack macros */
@@ -135,7 +135,7 @@
 	 * User mode entry and interrupt on the irq stack do not	\
 	 * switch stacks. If from user mode the task stack is empty.	\
 	 */								\
-	if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {	\
+	if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
 		irq_enter_rcu();					\
 		func(c_args);						\
 		irq_exit_rcu();						\
@@ -146,9 +146,9 @@
 		 * places. Invoke the stack switch macro with the call	\
 		 * sequence which matches the above direct invocation.	\
 		 */							\
-		__this_cpu_write(hardirq_stack_inuse, true);		\
+		__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);	\
 		call_on_irqstack(func, asm_call, constr);		\
-		__this_cpu_write(hardirq_stack_inuse, false);		\
+		__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);	\
 	}								\
 }
 
@@ -212,9 +212,9 @@
  */
 #define do_softirq_own_stack()						\
 {									\
-	__this_cpu_write(hardirq_stack_inuse, true);			\
+	__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);		\
 	call_on_irqstack(__do_softirq, ASM_CALL_ARG0);			\
-	__this_cpu_write(hardirq_stack_inuse, false);			\
+	__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);		\
 }
 
 #endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c345f30..bdde687 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -448,8 +448,6 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
 	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
 }
 
-DECLARE_PER_CPU(void *, hardirq_stack_ptr);
-DECLARE_PER_CPU(bool, hardirq_stack_inuse);
 extern asmlinkage void ignore_sysret(void);
 
 /* Save actual FS/GS selectors and bases to current->thread */
@@ -458,8 +456,6 @@ void current_save_fsgs(void);
 #ifdef CONFIG_STACKPROTECTOR
 DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
 #endif
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
 #endif	/* !X86_64 */
 
 struct perf_event;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 408245c..2bec4b4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2024,9 +2024,6 @@ DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
 		     fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
 
-DEFINE_PER_CPU(void *, hardirq_stack_ptr);
-DEFINE_PER_CPU(bool, hardirq_stack_inuse);
-
 static void wrmsrl_cstar(unsigned long val)
 {
 	/*
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 722fd71..b4905d5 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type)
 
 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
 {
-	unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
+	unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
 	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
 
 	/*
@@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
 
 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
 {
-	unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
+	unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr);
 	unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
 
 	/*
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6c5defd..f05339f 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
 
 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
 {
-	unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
+	unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
 	unsigned long *begin;
 
 	/*
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 01833eb..dc1049c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -52,9 +52,6 @@ static inline int check_stack_overflow(void) { return 0; }
 static inline void print_stack_overflow(void) { }
 #endif
 
-DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
-
 static void call_on_stack(void *func, void *stack)
 {
 	asm volatile("xchgl	%%ebx,%%esp	\n"
@@ -77,7 +74,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 	u32 *isp, *prev_esp, arg1;
 
 	curstk = (struct irq_stack *) current_stack();
-	irqstk = __this_cpu_read(hardirq_stack_ptr);
+	irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
 
 	/*
 	 * this is where we switch to the IRQ stack. However, if we are
@@ -115,7 +112,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
 	int node = cpu_to_node(cpu);
 	struct page *ph, *ps;
 
-	if (per_cpu(hardirq_stack_ptr, cpu))
+	if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
 		return 0;
 
 	ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
@@ -127,8 +124,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
 		return -ENOMEM;
 	}
 
-	per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
-	per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
+	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
+	per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
 	return 0;
 }
 
@@ -138,7 +135,7 @@ void do_softirq_own_stack(void)
 	struct irq_stack *irqstk;
 	u32 *isp, *prev_esp;
 
-	irqstk = __this_cpu_read(softirq_stack_ptr);
+	irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
 
 	/* build the stack frame on the softirq stack */
 	isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1c0fb96..fe0c859 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -50,7 +50,7 @@ static int map_irq_stack(unsigned int cpu)
 		return -ENOMEM;
 
 	/* Store actual TOS to avoid adjustment in the hotpath */
-	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
 	return 0;
 }
 #else
@@ -63,14 +63,14 @@ static int map_irq_stack(unsigned int cpu)
 	void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
 
 	/* Store actual TOS to avoid adjustment in the hotpath */
-	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
 	return 0;
 }
 #endif
 
 int irq_init_percpu_irqstack(unsigned int cpu)
 {
-	if (per_cpu(hardirq_stack_ptr, cpu))
+	if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
 		return 0;
 	return map_irq_stack(cpu);
 }
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7f807e8..1312de5 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -563,7 +563,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	int cpu = smp_processor_id();
 
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
-		     this_cpu_read(hardirq_stack_inuse));
+		     this_cpu_read(pcpu_hot.hardirq_stack_inuse));
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 		switch_fpu_prepare(prev_fpu, cpu);

  reply	other threads:[~2022-10-17 14:56 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-15 11:10 [PATCH v3 00/59] x86/retbleed: Call depth tracking mitigation Peter Zijlstra
2022-09-15 11:10 ` [PATCH v3 01/59] x86/paravirt: Ensure proper alignment Peter Zijlstra
2022-09-21 11:08   ` [tip: x86/paravirt] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 02/59] x86/cpu: Remove segment load from switch_to_new_gdt() Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 03/59] x86/cpu: Get rid of redundant switch_to_new_gdt() invocations Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 04/59] x86/cpu: Re-enable stackprotector Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 05/59] x86/modules: Set VM_FLUSH_RESET_PERMS in module_alloc() Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 06/59] x86/vdso: Ensure all kernel code is seen by objtool Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 07/59] x86: Sanitize linker script Peter Zijlstra
2022-10-07 16:03   ` Borislav Petkov
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 08/59] arch: Introduce CONFIG_FUNCTION_ALIGNMENT Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:10 ` [PATCH v3 09/59] x86/asm: Differentiate between code and function alignment Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 10/59] x86/error_inject: Align function properly Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:10 ` [PATCH v3 11/59] x86/paravirt: Properly align PV functions Peter Zijlstra
2022-09-15 14:34   ` Juergen Gross
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 12/59] x86/entry: Align SYM_CODE_START() variants Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 13/59] crypto: x86/camellia: Remove redundant alignments Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 14/59] crypto: x86/cast5: " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 15/59] crypto: x86/crct10dif-pcl: " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 16/59] crypto: x86/serpent: " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 17/59] crypto: x86/sha1: Remove custom alignments Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 18/59] crypto: x86/sha256: " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 19/59] crypto: x86/sm[34]: Remove redundant alignments Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:10 ` [PATCH v3 20/59] crypto: twofish: " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 21/59] crypto: x86/poly1305: Remove custom function alignment Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 22/59] x86: Put hot per CPU variables into a struct Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 23/59] x86/percpu: Move preempt_count next to current_task Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 24/59] x86/percpu: Move cpu_number " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 25/59] x86/percpu: Move current_top_of_stack " Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 26/59] x86/percpu: Move irq_stack variables " Peter Zijlstra
2022-10-17 14:54   ` tip-bot2 for Thomas Gleixner [this message]
2022-09-15 11:11 ` [PATCH v3 27/59] x86/softirq: Move softirq pending next to current task Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 28/59] objtool: Allow !PC relative relocations Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 29/59] objtool: Track init section Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 30/59] objtool: Add .call_sites section Peter Zijlstra
2022-10-17 14:54   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 31/59] objtool: Add --hacks=skylake Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 32/59] objtool: Allow STT_NOTYPE -> STT_FUNC+0 tail-calls Peter Zijlstra
2022-09-22  5:27   ` Pawan Gupta
2022-09-22 10:29     ` Peter Zijlstra
2022-09-22 10:47       ` Peter Zijlstra
2022-09-22 13:15         ` Peter Zijlstra
2022-09-23 14:35           ` Peter Zijlstra
2022-09-23 17:36             ` Pawan Gupta
2022-09-15 11:11 ` [PATCH v3 33/59] objtool: Fix find_{symbol,func}_containing() Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 34/59] objtool: Allow symbol range comparisons for IBT/ENDBR Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 35/59] x86/entry: Make sync_regs() invocation a tail call Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 36/59] ftrace: Add HAVE_DYNAMIC_FTRACE_NO_PATCHABLE Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 37/59] x86/putuser: Provide room for padding Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 38/59] x86/Kconfig: Add CONFIG_CALL_THUNKS Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 39/59] x86/Kconfig: Introduce function padding Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 40/59] x86/retbleed: Add X86_FEATURE_CALL_DEPTH Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 41/59] x86/alternatives: Provide text_poke_copy_locked() Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 42/59] x86/entry: Make some entry symbols global Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 43/59] x86/paravirt: Make struct paravirt_call_site unconditionally available Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 44/59] x86/callthunks: Add call patching for call depth tracking Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 45/59] x86/modules: Add call patching Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 46/59] x86/returnthunk: Allow different return thunks Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 47/59] x86/asm: Provide ALTERNATIVE_3 Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 48/59] x86/retbleed: Add SKL return thunk Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-10-20 23:10   ` [PATCH v3 48/59] " Nathan Chancellor
2022-10-21  9:53     ` Peter Zijlstra
2022-10-21 15:21       ` Nathan Chancellor
2022-11-03 22:53         ` KVM vs AMD: " Andrew Cooper
2022-11-04 12:44           ` Peter Zijlstra
2022-11-04 15:29             ` Andrew Cooper
2022-11-04 15:32             ` Nathan Chancellor
2022-11-07  9:37             ` Paolo Bonzini
2022-09-15 11:11 ` [PATCH v3 49/59] x86/retpoline: Add SKL retthunk retpolines Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 50/59] x86/retbleed: Add SKL call thunk Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 51/59] x86/calldepth: Add ret/call counting for debug Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2022-09-15 11:11 ` [PATCH v3 52/59] static_call: Add call depth tracking support Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 53/59] kallsyms: Take callthunks into account Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 54/59] x86/orc: Make it callthunk aware Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 55/59] x86/bpf: Emit call depth accounting if required Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner
2023-01-05 21:49   ` [PATCH v3 55/59] " Joan Bruguera
2022-09-15 11:11 ` [PATCH v3 56/59] x86/ftrace: Remove ftrace_epilogue() Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-10-20 15:17   ` [tip: x86/urgent] " tip-bot2 for Peter Zijlstra
2022-12-09 15:41   ` [PATCH v3 56/59] " Steven Rostedt
2022-09-15 11:11 ` [PATCH v3 57/59] x86/ftrace: Rebalance RSB Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 58/59] x86/ftrace: Make it call depth tracking aware Peter Zijlstra
2022-09-21 10:19   ` [PATCH v3.1 " Peter Zijlstra
2022-09-21 18:45     ` Pawan Gupta
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Peter Zijlstra
2022-09-15 11:11 ` [PATCH v3 59/59] x86/retbleed: Add call depth tracking mitigation Peter Zijlstra
2022-10-17 14:53   ` [tip: x86/core] " tip-bot2 for Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=166601844549.401.13580790256557728644.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).