All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw2@infradead.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Brian Gerst <brgerst@gmail.com>,
	Arjan van de Veen <arjan@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Paul McKenney <paulmck@kernel.org>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Sean Christopherson <seanjc@google.com>,
	Oleksandr Natalenko <oleksandr@natalenko.name>,
	Paul Menzel <pmenzel@molgen.mpg.de>,
	"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
	Piotr Gorski <lucjan.lucjanov@gmail.com>,
	Usama Arif <usama.arif@bytedance.com>,
	Juergen Gross <jgross@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	xen-devel@lists.xenproject.org,
	Russell King <linux@armlinux.org.uk>,
	Arnd Bergmann <arnd@arndb.de>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Helge Deller <deller@gmx.de>,
	linux-parisc@vger.kernel.org,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	linux-riscv@lists.infradead.org,
	Mark Rutland <mark.rutland@arm.com>,
	Sabin Rapan <sabrapan@amazon.com>,
	"Michael Kelley (LINUX)" <mikelley@microsoft.com>,
	Ross Philipson <ross.philipson@oracle.com>
Subject: [patch V4 13/37] x86/smpboot: Make TSC synchronization function call based
Date: Fri, 12 May 2023 23:07:17 +0200 (CEST)	[thread overview]
Message-ID: <20230512205256.148255496@linutronix.de> (raw)
In-Reply-To: 20230512203426.452963764@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

Spin-waiting on the control CPU until the AP reaches the TSC
synchronization is just a waste especially in the case that there is no
synchronization required.

As the synchronization has to run with interrupts disabled the control CPU
part can just be done from a SMP function call. The upcoming AP issues that
call async only in the case that synchronization is required.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
---
 arch/x86/include/asm/tsc.h |    2 --
 arch/x86/kernel/smpboot.c  |   20 +++-----------------
 arch/x86/kernel/tsc_sync.c |   36 +++++++++++-------------------------
 3 files changed, 14 insertions(+), 44 deletions(-)

--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -55,12 +55,10 @@ extern bool tsc_async_resets;
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
 extern void tsc_verify_tsc_adjust(bool resume);
-extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
 static inline void tsc_verify_tsc_adjust(bool resume) { }
-static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
 
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -275,11 +275,7 @@ static void notrace start_secondary(void
 	 */
 	smp_callin();
 
-	/*
-	 * Check TSC synchronization with the control CPU, which will do
-	 * its part of this from wait_cpu_online(), making it an implicit
-	 * synchronization point.
-	 */
+	/* Check TSC synchronization with the control CPU. */
 	check_tsc_sync_target();
 
 	/*
@@ -1141,21 +1137,11 @@ static void wait_cpu_callin(unsigned int
 }
 
 /*
- * Bringup step four: Synchronize the TSC and wait for the target AP
- * to reach set_cpu_online() in start_secondary().
+ * Bringup step four: Wait for the target AP to reach set_cpu_online() in
+ * start_secondary().
  */
 static void wait_cpu_online(unsigned int cpu)
 {
-	unsigned long flags;
-
-	/*
-	 * Check TSC synchronization with the AP (keep irqs disabled
-	 * while doing so):
-	 */
-	local_irq_save(flags);
-	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
-
 	/*
 	 * Wait for the AP to mark itself online, so the core caller
 	 * can drop sparse_irq_lock.
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -245,7 +245,6 @@ bool tsc_store_and_check_tsc_adjust(bool
  */
 static atomic_t start_count;
 static atomic_t stop_count;
-static atomic_t skip_test;
 static atomic_t test_runs;
 
 /*
@@ -344,21 +343,14 @@ static inline unsigned int loop_timeout(
 }
 
 /*
- * Source CPU calls into this - it waits for the freshly booted
- * target CPU to arrive and then starts the measurement:
+ * The freshly booted CPU initiates this via an async SMP function call.
  */
-void check_tsc_sync_source(int cpu)
+static void check_tsc_sync_source(void *__cpu)
 {
+	unsigned int cpu = (unsigned long)__cpu;
 	int cpus = 2;
 
 	/*
-	 * No need to check if we already know that the TSC is not
-	 * synchronized or if we have no TSC.
-	 */
-	if (unsynchronized_tsc())
-		return;
-
-	/*
 	 * Set the maximum number of test runs to
 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
 	 *  3 if the MSR is available, so the target can try to adjust
@@ -368,16 +360,9 @@ void check_tsc_sync_source(int cpu)
 	else
 		atomic_set(&test_runs, 3);
 retry:
-	/*
-	 * Wait for the target to start or to skip the test:
-	 */
-	while (atomic_read(&start_count) != cpus - 1) {
-		if (atomic_read(&skip_test) > 0) {
-			atomic_set(&skip_test, 0);
-			return;
-		}
+	/* Wait for the target to start. */
+	while (atomic_read(&start_count) != cpus - 1)
 		cpu_relax();
-	}
 
 	/*
 	 * Trigger the target to continue into the measurement too:
@@ -397,14 +382,14 @@ void check_tsc_sync_source(int cpu)
 	if (!nr_warps) {
 		atomic_set(&test_runs, 0);
 
-		pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+		pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
 			smp_processor_id(), cpu);
 
 	} else if (atomic_dec_and_test(&test_runs) || random_warps) {
 		/* Force it to 0 if random warps brought us here */
 		atomic_set(&test_runs, 0);
 
-		pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+		pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
 			smp_processor_id(), cpu);
 		pr_warn("Measured %Ld cycles TSC warp between CPUs, "
 			"turning off TSC clock.\n", max_warp);
@@ -457,11 +442,12 @@ void check_tsc_sync_target(void)
 	 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
 	 * register might have been wreckaged by the BIOS..
 	 */
-	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
-		atomic_inc(&skip_test);
+	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
 		return;
-	}
 
+	/* Kick the control CPU into the TSC synchronization function */
+	smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source,
+				 (unsigned long *)(unsigned long)cpu, 0);
 retry:
 	/*
 	 * Register this CPU's participation and wait for the


WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw2@infradead.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Brian Gerst <brgerst@gmail.com>,
	Arjan van de Veen <arjan@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Paul McKenney <paulmck@kernel.org>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Sean Christopherson <seanjc@google.com>,
	Oleksandr Natalenko <oleksandr@natalenko.name>,
	Paul Menzel <pmenzel@molgen.mpg.de>,
	"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
	Piotr Gorski <lucjan.lucjanov@gmail.com>,
	Usama Arif <usama.arif@bytedance.com>,
	Juergen Gross <jgross@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	xen-devel@lists.xenproject.org,
	Russell King <linux@armlinux.org.uk>,
	Arnd Bergmann <arnd@arndb.de>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Helge Deller <deller@gmx.de>,
	linux-parisc@vger.kernel.org,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	linux-riscv@lists.infradead.org,
	Mark Rutland <mark.rutland@arm.com>,
	Sabin Rapan <sabrapan@amazon.com>,
	"Michael Kelley (LINUX)" <mikelley@microsoft.com>,
	Ross Philipson <ross.philipson@oracle.com>
Subject: [patch V4 13/37] x86/smpboot: Make TSC synchronization function call based
Date: Fri, 12 May 2023 23:07:17 +0200 (CEST)	[thread overview]
Message-ID: <20230512205256.148255496@linutronix.de> (raw)
In-Reply-To: 20230512203426.452963764@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

Spin-waiting on the control CPU until the AP reaches the TSC
synchronization is just a waste especially in the case that there is no
synchronization required.

As the synchronization has to run with interrupts disabled the control CPU
part can just be done from a SMP function call. The upcoming AP issues that
call async only in the case that synchronization is required.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
---
 arch/x86/include/asm/tsc.h |    2 --
 arch/x86/kernel/smpboot.c  |   20 +++-----------------
 arch/x86/kernel/tsc_sync.c |   36 +++++++++++-------------------------
 3 files changed, 14 insertions(+), 44 deletions(-)

--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -55,12 +55,10 @@ extern bool tsc_async_resets;
 #ifdef CONFIG_X86_TSC
 extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
 extern void tsc_verify_tsc_adjust(bool resume);
-extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 #else
 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
 static inline void tsc_verify_tsc_adjust(bool resume) { }
-static inline void check_tsc_sync_source(int cpu) { }
 static inline void check_tsc_sync_target(void) { }
 #endif
 
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -275,11 +275,7 @@ static void notrace start_secondary(void
 	 */
 	smp_callin();
 
-	/*
-	 * Check TSC synchronization with the control CPU, which will do
-	 * its part of this from wait_cpu_online(), making it an implicit
-	 * synchronization point.
-	 */
+	/* Check TSC synchronization with the control CPU. */
 	check_tsc_sync_target();
 
 	/*
@@ -1141,21 +1137,11 @@ static void wait_cpu_callin(unsigned int
 }
 
 /*
- * Bringup step four: Synchronize the TSC and wait for the target AP
- * to reach set_cpu_online() in start_secondary().
+ * Bringup step four: Wait for the target AP to reach set_cpu_online() in
+ * start_secondary().
  */
 static void wait_cpu_online(unsigned int cpu)
 {
-	unsigned long flags;
-
-	/*
-	 * Check TSC synchronization with the AP (keep irqs disabled
-	 * while doing so):
-	 */
-	local_irq_save(flags);
-	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
-
 	/*
 	 * Wait for the AP to mark itself online, so the core caller
 	 * can drop sparse_irq_lock.
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -245,7 +245,6 @@ bool tsc_store_and_check_tsc_adjust(bool
  */
 static atomic_t start_count;
 static atomic_t stop_count;
-static atomic_t skip_test;
 static atomic_t test_runs;
 
 /*
@@ -344,21 +343,14 @@ static inline unsigned int loop_timeout(
 }
 
 /*
- * Source CPU calls into this - it waits for the freshly booted
- * target CPU to arrive and then starts the measurement:
+ * The freshly booted CPU initiates this via an async SMP function call.
  */
-void check_tsc_sync_source(int cpu)
+static void check_tsc_sync_source(void *__cpu)
 {
+	unsigned int cpu = (unsigned long)__cpu;
 	int cpus = 2;
 
 	/*
-	 * No need to check if we already know that the TSC is not
-	 * synchronized or if we have no TSC.
-	 */
-	if (unsynchronized_tsc())
-		return;
-
-	/*
 	 * Set the maximum number of test runs to
 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
 	 *  3 if the MSR is available, so the target can try to adjust
@@ -368,16 +360,9 @@ void check_tsc_sync_source(int cpu)
 	else
 		atomic_set(&test_runs, 3);
 retry:
-	/*
-	 * Wait for the target to start or to skip the test:
-	 */
-	while (atomic_read(&start_count) != cpus - 1) {
-		if (atomic_read(&skip_test) > 0) {
-			atomic_set(&skip_test, 0);
-			return;
-		}
+	/* Wait for the target to start. */
+	while (atomic_read(&start_count) != cpus - 1)
 		cpu_relax();
-	}
 
 	/*
 	 * Trigger the target to continue into the measurement too:
@@ -397,14 +382,14 @@ void check_tsc_sync_source(int cpu)
 	if (!nr_warps) {
 		atomic_set(&test_runs, 0);
 
-		pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+		pr_debug("TSC synchronization [CPU#%d -> CPU#%u]: passed\n",
 			smp_processor_id(), cpu);
 
 	} else if (atomic_dec_and_test(&test_runs) || random_warps) {
 		/* Force it to 0 if random warps brought us here */
 		atomic_set(&test_runs, 0);
 
-		pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+		pr_warn("TSC synchronization [CPU#%d -> CPU#%u]:\n",
 			smp_processor_id(), cpu);
 		pr_warn("Measured %Ld cycles TSC warp between CPUs, "
 			"turning off TSC clock.\n", max_warp);
@@ -457,11 +442,12 @@ void check_tsc_sync_target(void)
 	 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
 	 * register might have been wreckaged by the BIOS..
 	 */
-	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
-		atomic_inc(&skip_test);
+	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable)
 		return;
-	}
 
+	/* Kick the control CPU into the TSC synchronization function */
+	smp_call_function_single(cpumask_first(cpu_online_mask), check_tsc_sync_source,
+				 (unsigned long *)(unsigned long)cpu, 0);
 retry:
 	/*
 	 * Register this CPU's participation and wait for the


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2023-05-12 21:08 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-12 21:06 [patch V4 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner
2023-05-12 21:06 ` Thomas Gleixner
2023-05-12 21:06 ` [patch V4 01/37] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner
2023-05-12 21:06   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 02/37] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 03/37] x86/smpboot: Avoid pointless delay calibration if TSC is synchronized Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 04/37] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-06-12 23:45   ` [patch V4 04/37] " Philippe Mathieu-Daudé
2023-06-12 23:45     ` Philippe Mathieu-Daudé
2023-06-12 23:45     ` Philippe Mathieu-Daudé
2023-05-12 21:07 ` [patch V4 05/37] x86/topology: Remove CPU0 hotplug option Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 06/37] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 07/37] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-06-12 23:46   ` [patch V4 07/37] " Philippe Mathieu-Daudé
2023-06-12 23:46     ` Philippe Mathieu-Daudé
2023-06-12 23:46     ` Philippe Mathieu-Daudé
2023-05-12 21:07 ` [patch V4 08/37] x86/smpboot: Remove unnecessary barrier() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 09/37] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for David Woodhouse
2023-05-12 21:07 ` [patch V4 10/37] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-06-12 23:49   ` [patch V4 10/37] " Philippe Mathieu-Daudé
2023-06-12 23:49     ` Philippe Mathieu-Daudé
2023-06-12 23:49     ` Philippe Mathieu-Daudé
2023-05-12 21:07 ` [patch V4 11/37] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 12/37] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` Thomas Gleixner [this message]
2023-05-12 21:07   ` [patch V4 13/37] x86/smpboot: Make TSC synchronization function call based Thomas Gleixner
2023-05-16  9:10   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 14/37] x86/smpboot: Remove cpu_callin_mask Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 15/37] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 16/37] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 17/37] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 18/37] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 19/37] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 20/37] x86/smpboot: Switch to hotplug core state synchronization Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 21/37] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 22/37] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 23/37] arm64: " Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 24/37] csky/smp: " Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 25/37] MIPS: SMP_CPS: " Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 26/37] parisc: " Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 27/37] riscv: " Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 28/37] cpu/hotplug: Remove unused state functions Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 29/37] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for David Woodhouse
2023-05-12 21:07 ` [patch V4 30/37] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 31/37] x86/smpboot: Enable split CPU startup Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 32/37] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 33/37] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-22 19:45   ` [patch V4 33/37] " Mark Brown
2023-05-22 19:45     ` Mark Brown
2023-05-22 19:45     ` Mark Brown
2023-05-22 21:04     ` Thomas Gleixner
2023-05-22 21:04       ` Thomas Gleixner
2023-05-22 21:04       ` Thomas Gleixner
2023-05-22 22:27       ` Mark Brown
2023-05-22 22:27         ` Mark Brown
2023-05-22 22:27         ` Mark Brown
2023-05-22 23:12         ` Thomas Gleixner
2023-05-22 23:12           ` Thomas Gleixner
2023-05-22 23:12           ` Thomas Gleixner
2023-05-23 10:19           ` Mark Brown
2023-05-23 10:19             ` Mark Brown
2023-05-23 10:19             ` Mark Brown
2023-05-23 16:13           ` [tip: smp/core] cpu/hotplug: Fix off by one in cpuhp_bringup_mask() tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 34/37] x86/apic: Save the APIC virtual base address Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 35/37] x86/smpboot: Implement a bit spinlock to protect the realmode stack Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-12 21:07 ` [patch V4 36/37] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for David Woodhouse
2023-05-19 16:28   ` [patch V4 36/37] " Jeffrey Hugo
2023-05-19 16:28     ` Jeffrey Hugo
2023-05-19 16:28     ` Jeffrey Hugo
2023-05-19 16:57     ` Andrew Cooper
2023-05-19 16:57       ` Andrew Cooper
2023-05-19 16:57       ` Andrew Cooper
2023-05-19 17:44       ` Jeffrey Hugo
2023-05-19 17:44         ` Jeffrey Hugo
2023-05-19 17:44         ` Jeffrey Hugo
2023-05-12 21:07 ` [patch V4 37/37] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner
2023-05-12 21:07   ` Thomas Gleixner
2023-05-15 12:00   ` Peter Zijlstra
2023-05-15 12:00     ` Peter Zijlstra
2023-05-15 12:00     ` Peter Zijlstra
2023-05-16  9:09   ` [tip: smp/core] " tip-bot2 for Thomas Gleixner
2023-05-13 18:32 ` [patch V4 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Oleksandr Natalenko
2023-05-13 18:32   ` Oleksandr Natalenko
2023-05-13 21:00   ` Helge Deller
2023-05-13 21:00     ` Helge Deller
2023-05-14 21:48 ` Guilherme G. Piccoli
2023-05-14 21:48   ` Guilherme G. Piccoli
2023-05-22 10:57 ` [PATCH] x86/apic: Fix use of X{,2}APIC_ENABLE in asm with older binutils Andrew Cooper
2023-05-22 10:57   ` Andrew Cooper
2023-05-22 10:57   ` Andrew Cooper
2023-05-22 11:17   ` Russell King (Oracle)
2023-05-22 11:17     ` Russell King (Oracle)
2023-05-22 11:17     ` Russell King (Oracle)
2023-05-22 12:12   ` [tip: smp/core] " tip-bot2 for Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230512205256.148255496@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=arjan@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=boris.ostrovsky@oracle.com \
    --cc=brgerst@gmail.com \
    --cc=catalin.marinas@arm.com \
    --cc=deller@gmx.de \
    --cc=dwmw2@infradead.org \
    --cc=gpiccoli@igalia.com \
    --cc=guoren@kernel.org \
    --cc=jgross@suse.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=lucjan.lucjanov@gmail.com \
    --cc=mark.rutland@arm.com \
    --cc=mikelley@microsoft.com \
    --cc=oleksandr@natalenko.name \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pmenzel@molgen.mpg.de \
    --cc=ross.philipson@oracle.com \
    --cc=sabrapan@amazon.com \
    --cc=seanjc@google.com \
    --cc=thomas.lendacky@amd.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=usama.arif@bytedance.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.