linux-parisc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, David Woodhouse <dwmw2@infradead.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Brian Gerst <brgerst@gmail.com>,
	Arjan van de Veen <arjan@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Paul McKenney <paulmck@kernel.org>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Sean Christopherson <seanjc@google.com>,
	Oleksandr Natalenko <oleksandr@natalenko.name>,
	Paul Menzel <pmenzel@molgen.mpg.de>,
	"Guilherme G. Piccoli" <gpiccoli@igalia.com>,
	Piotr Gorski <lucjan.lucjanov@gmail.com>,
	Usama Arif <usama.arif@bytedance.com>,
	Juergen Gross <jgross@suse.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	xen-devel@lists.xenproject.org,
	Russell King <linux@armlinux.org.uk>,
	Arnd Bergmann <arnd@arndb.de>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>,
	linux-csky@vger.kernel.org,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-mips@vger.kernel.org,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Helge Deller <deller@gmx.de>,
	linux-parisc@vger.kernel.org,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	linux-riscv@lists.infradead.org,
	Mark Rutland <mark.rutland@arm.com>,
	Sabin Rapan <sabrapan@amazon.com>,
	"Michael Kelley (LINUX)" <mikelley@microsoft.com>
Subject: [patch v3 19/36] x86/smpboot: Switch to hotplug core state synchronization
Date: Mon,  8 May 2023 21:43:57 +0200 (CEST)	[thread overview]
Message-ID: <20230508185218.297596481@linutronix.de> (raw)
In-Reply-To: 20230508181633.089804905@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

The new AP state tracking and synchronization mechanism in the CPU hotplug
core code allows to remove quite some x86 specific code:

  1) The AP alive synchronization based on cpumasks

  2) The decision whether an AP can be brought up again

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>

---
V2: Use for_each_online_cpu() - Brian
---
 arch/x86/Kconfig           |    1 
 arch/x86/include/asm/smp.h |    7 +
 arch/x86/kernel/smp.c      |    1 
 arch/x86/kernel/smpboot.c  |  161 ++++++++++-----------------------------------
 arch/x86/xen/smp_hvm.c     |   16 +---
 arch/x86/xen/smp_pv.c      |   39 ++++++----
 6 files changed, 73 insertions(+), 152 deletions(-)
---

--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -274,6 +274,7 @@ config X86
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_USER_RETURN_NOTIFIER
 	select HAVE_GENERIC_VDSO
+	select HOTPLUG_CORE_SYNC_FULL		if SMP
 	select HOTPLUG_SMT			if SMP
 	select IRQ_FORCED_THREADING
 	select NEED_PER_CPU_EMBED_FIRST_CHUNK
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -38,6 +38,8 @@ struct smp_ops {
 	void (*crash_stop_other_cpus)(void);
 	void (*smp_send_reschedule)(int cpu);
 
+	void (*cleanup_dead_cpu)(unsigned cpu);
+	void (*poll_sync_state)(void);
 	int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
 	int (*cpu_disable)(void);
 	void (*cpu_die)(unsigned int cpu);
@@ -90,7 +92,8 @@ static inline int __cpu_disable(void)
 
 static inline void __cpu_die(unsigned int cpu)
 {
-	smp_ops.cpu_die(cpu);
+	if (smp_ops.cpu_die)
+		smp_ops.cpu_die(cpu);
 }
 
 static inline void __noreturn play_dead(void)
@@ -123,8 +126,6 @@ void native_smp_cpus_done(unsigned int m
 int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_disable(void);
-int common_cpu_die(unsigned int cpu);
-void native_cpu_die(unsigned int cpu);
 void __noreturn hlt_play_dead(void);
 void native_play_dead(void);
 void play_dead_common(void);
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -269,7 +269,6 @@ struct smp_ops smp_ops = {
 	.smp_send_reschedule	= native_smp_send_reschedule,
 
 	.cpu_up			= native_cpu_up,
-	.cpu_die		= native_cpu_die,
 	.cpu_disable		= native_cpu_disable,
 	.play_dead		= native_play_dead,
 
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,6 +57,7 @@
 #include <linux/pgtable.h>
 #include <linux/overflow.h>
 #include <linux/stackprotector.h>
+#include <linux/cpuhotplug.h>
 
 #include <asm/acpi.h>
 #include <asm/cacheinfo.h>
@@ -101,9 +102,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_die_map);
 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-/* All of these masks are initialized in setup_cpu_local_masks() */
-static cpumask_var_t cpu_initialized_mask;
-static cpumask_var_t cpu_callout_mask;
 /* Representing CPUs for which sibling maps can be computed */
 static cpumask_var_t cpu_sibling_setup_mask;
 
@@ -169,8 +167,8 @@ static void smp_callin(void)
 	int cpuid = smp_processor_id();
 
 	/*
-	 * If waken up by an INIT in an 82489DX configuration
-	 * cpu_callout_mask guarantees we don't get here before an
+	 * If waken up by an INIT in an 82489DX configuration the alive
+	 * synchronization guarantees we don't get here before an
 	 * INIT_deassert IPI reaches our local APIC, so it is now safe to
 	 * touch our local APIC.
 	 *
@@ -216,17 +214,6 @@ static void ap_calibrate_delay(void)
 	cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy;
 }
 
-static void wait_for_master_cpu(int cpu)
-{
-	/*
-	 * Wait for release by control CPU before continuing with AP
-	 * initialization.
-	 */
-	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
-	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
-		cpu_relax();
-}
-
 /*
  * Activate a secondary processor.
  */
@@ -247,11 +234,11 @@ static void notrace start_secondary(void
 	cpu_init_exception_handling();
 
 	/*
-	 * Sync point with wait_cpu_initialized(). Sets AP in
-	 * cpu_initialized_mask and then waits for the control CPU
-	 * to release it.
+	 * Synchronization point with the hotplug core. Sets the
+	 * synchronization state to ALIVE and waits for the control CPU to
+	 * release this CPU for further bringup.
 	 */
-	wait_for_master_cpu(raw_smp_processor_id());
+	cpuhp_ap_sync_alive();
 
 	cpu_init();
 	rcu_cpu_starting(raw_smp_processor_id());
@@ -284,7 +271,6 @@ static void notrace start_secondary(void
 	set_cpu_online(smp_processor_id(), true);
 	lapic_online();
 	unlock_vector_lock();
-	cpu_set_state_online(smp_processor_id());
 	x86_platform.nmi_init();
 
 	/* enable local interrupts */
@@ -735,9 +721,9 @@ static void impress_friends(void)
 	 * Allow the user to impress friends.
 	 */
 	pr_debug("Before bogomips\n");
-	for_each_possible_cpu(cpu)
-		if (cpumask_test_cpu(cpu, cpu_callout_mask))
-			bogosum += cpu_data(cpu).loops_per_jiffy;
+	for_each_online_cpu(cpu)
+		bogosum += cpu_data(cpu).loops_per_jiffy;
+
 	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
 		num_online_cpus(),
 		bogosum/(500000/HZ),
@@ -1009,6 +995,7 @@ int common_cpu_up(unsigned int cpu, stru
 static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 {
 	unsigned long start_ip = real_mode_header->trampoline_start;
+	int ret;
 
 #ifdef CONFIG_X86_64
 	/* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
@@ -1049,13 +1036,6 @@ static int do_boot_cpu(int apicid, int c
 		}
 	}
 
-	/*
-	 * AP might wait on cpu_callout_mask in cpu_init() with
-	 * cpu_initialized_mask set if previous attempt to online
-	 * it timed-out. Clear cpu_initialized_mask so that after
-	 * INIT/SIPI it could start with a clean state.
-	 */
-	cpumask_clear_cpu(cpu, cpu_initialized_mask);
 	smp_mb();
 
 	/*
@@ -1066,47 +1046,16 @@ static int do_boot_cpu(int apicid, int c
 	 * - Use an INIT boot APIC message
 	 */
 	if (apic->wakeup_secondary_cpu_64)
-		return apic->wakeup_secondary_cpu_64(apicid, start_ip);
+		ret = apic->wakeup_secondary_cpu_64(apicid, start_ip);
 	else if (apic->wakeup_secondary_cpu)
-		return apic->wakeup_secondary_cpu(apicid, start_ip);
-
-	return wakeup_secondary_cpu_via_init(apicid, start_ip);
-}
-
-static int wait_cpu_cpumask(unsigned int cpu, const struct cpumask *mask)
-{
-	unsigned long timeout;
-
-	/*
-	 * Wait up to 10s for the CPU to report in.
-	 */
-	timeout = jiffies + 10*HZ;
-	while (time_before(jiffies, timeout)) {
-		if (cpumask_test_cpu(cpu, mask))
-			return 0;
-
-		schedule();
-	}
-	return -1;
-}
-
-/*
- * Bringup step two: Wait for the target AP to reach cpu_init_secondary()
- * and thus wait_for_master_cpu(), then set cpu_callout_mask to allow it
- * to proceed.  The AP will then proceed past setting its 'callin' bit
- * and end up waiting in check_tsc_sync_target() until we reach
- * wait_cpu_online() to tend to it.
- */
-static int wait_cpu_initialized(unsigned int cpu)
-{
-	/*
-	 * Wait for first sign of life from AP.
-	 */
-	if (wait_cpu_cpumask(cpu, cpu_initialized_mask))
-		return -1;
+		ret = apic->wakeup_secondary_cpu(apicid, start_ip);
+	else
+		ret = wakeup_secondary_cpu_via_init(apicid, start_ip);
 
-	cpumask_set_cpu(cpu, cpu_callout_mask);
-	return 0;
+	/* If the wakeup mechanism failed, cleanup the warm reset vector */
+	if (ret)
+		arch_cpuhp_cleanup_kick_cpu(cpu);
+	return ret;
 }
 
 static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
@@ -1131,11 +1080,6 @@ static int native_kick_ap(unsigned int c
 	 */
 	mtrr_save_state();
 
-	/* x86 CPUs take themselves offline, so delayed offline is OK. */
-	err = cpu_check_up_prepare(cpu);
-	if (err && err != -EBUSY)
-		return err;
-
 	/* the FPU context is blank, nobody can own it */
 	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
 
@@ -1152,17 +1096,29 @@ static int native_kick_ap(unsigned int c
 
 int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-	int ret;
-
-	ret = native_kick_ap(cpu, tidle);
-	if (!ret)
-		ret = wait_cpu_initialized(cpu);
+	return native_kick_ap(cpu, tidle);
+}
 
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
+{
 	/* Cleanup possible dangling ends... */
-	if (x86_platform.legacy.warm_reset)
+	if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset)
 		smpboot_restore_warm_reset_vector();
+}
 
-	return ret;
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
+{
+	if (smp_ops.cleanup_dead_cpu)
+		smp_ops.cleanup_dead_cpu(cpu);
+
+	if (system_state == SYSTEM_RUNNING)
+		pr_info("CPU %u is now offline\n", cpu);
+}
+
+void arch_cpuhp_sync_state_poll(void)
+{
+	if (smp_ops.poll_sync_state)
+		smp_ops.poll_sync_state();
 }
 
 /**
@@ -1354,9 +1310,6 @@ void __init native_smp_prepare_boot_cpu(
 	if (!IS_ENABLED(CONFIG_SMP))
 		switch_gdt_and_percpu_base(me);
 
-	/* already set me in cpu_online_mask in boot_cpu_init() */
-	cpumask_set_cpu(me, cpu_callout_mask);
-	cpu_set_state_online(me);
 	native_pv_lock_init();
 }
 
@@ -1483,8 +1436,6 @@ early_param("possible_cpus", _setup_poss
 /* correctly size the local cpu masks */
 void __init setup_cpu_local_masks(void)
 {
-	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
-	alloc_bootmem_cpumask_var(&cpu_callout_mask);
 	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 }
 
@@ -1546,9 +1497,6 @@ static void remove_siblinginfo(int cpu)
 static void remove_cpu_from_maps(int cpu)
 {
 	set_cpu_online(cpu, false);
-	cpumask_clear_cpu(cpu, cpu_callout_mask);
-	/* was set by cpu_init() */
-	cpumask_clear_cpu(cpu, cpu_initialized_mask);
 	numa_remove_cpu(cpu);
 }
 
@@ -1599,36 +1547,11 @@ int native_cpu_disable(void)
 	return 0;
 }
 
-int common_cpu_die(unsigned int cpu)
-{
-	int ret = 0;
-
-	/* We don't do anything here: idle task is faking death itself. */
-
-	/* They ack this in play_dead() by setting CPU_DEAD */
-	if (cpu_wait_death(cpu, 5)) {
-		if (system_state == SYSTEM_RUNNING)
-			pr_info("CPU %u is now offline\n", cpu);
-	} else {
-		pr_err("CPU %u didn't die...\n", cpu);
-		ret = -1;
-	}
-
-	return ret;
-}
-
-void native_cpu_die(unsigned int cpu)
-{
-	common_cpu_die(cpu);
-}
-
 void play_dead_common(void)
 {
 	idle_task_exit();
 
-	/* Ack it */
-	(void)cpu_report_death();
-
+	cpuhp_ap_report_dead();
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
 	 */
@@ -1730,12 +1653,6 @@ int native_cpu_disable(void)
 	return -ENOSYS;
 }
 
-void native_cpu_die(unsigned int cpu)
-{
-	/* We said "no" in __cpu_disable */
-	BUG();
-}
-
 void native_play_dead(void)
 {
 	BUG();
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -55,18 +55,16 @@ static void __init xen_hvm_smp_prepare_c
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void xen_hvm_cpu_die(unsigned int cpu)
+static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
 {
-	if (common_cpu_die(cpu) == 0) {
-		if (xen_have_vector_callback) {
-			xen_smp_intr_free(cpu);
-			xen_uninit_lock_cpu(cpu);
-			xen_teardown_timer(cpu);
-		}
+	if (xen_have_vector_callback) {
+		xen_smp_intr_free(cpu);
+		xen_uninit_lock_cpu(cpu);
+		xen_teardown_timer(cpu);
 	}
 }
 #else
-static void xen_hvm_cpu_die(unsigned int cpu)
+static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
 {
 	BUG();
 }
@@ -77,7 +75,7 @@ void __init xen_hvm_smp_init(void)
 	smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
 	smp_ops.smp_cpus_done = xen_smp_cpus_done;
-	smp_ops.cpu_die = xen_hvm_cpu_die;
+	smp_ops.cleanup_dead_cpu = xen_hvm_cleanup_dead_cpu;
 
 	if (!xen_have_vector_callback) {
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -62,6 +62,7 @@ static void cpu_bringup(void)
 	int cpu;
 
 	cr4_init();
+	cpuhp_ap_sync_alive();
 	cpu_init();
 	touch_softlockup_watchdog();
 
@@ -83,7 +84,7 @@ static void cpu_bringup(void)
 
 	set_cpu_online(cpu, true);
 
-	cpu_set_state_online(cpu);  /* Implies full memory barrier. */
+	smp_mb();
 
 	/* We can take interrupts now: we're officially "up". */
 	local_irq_enable();
@@ -323,14 +324,6 @@ static int xen_pv_cpu_up(unsigned int cp
 
 	xen_setup_runstate_info(cpu);
 
-	/*
-	 * PV VCPUs are always successfully taken down (see 'while' loop
-	 * in xen_cpu_die()), so -EBUSY is an error.
-	 */
-	rc = cpu_check_up_prepare(cpu);
-	if (rc)
-		return rc;
-
 	/* make sure interrupts start blocked */
 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
 
@@ -349,6 +342,11 @@ static int xen_pv_cpu_up(unsigned int cp
 	return 0;
 }
 
+static void xen_pv_poll_sync_state(void)
+{
+	HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 static int xen_pv_cpu_disable(void)
 {
@@ -364,18 +362,18 @@ static int xen_pv_cpu_disable(void)
 
 static void xen_pv_cpu_die(unsigned int cpu)
 {
-	while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
-				  xen_vcpu_nr(cpu), NULL)) {
+	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) {
 		__set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(HZ/10);
 	}
+}
 
-	if (common_cpu_die(cpu) == 0) {
-		xen_smp_intr_free(cpu);
-		xen_uninit_lock_cpu(cpu);
-		xen_teardown_timer(cpu);
-		xen_pmu_finish(cpu);
-	}
+static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
+{
+	xen_smp_intr_free(cpu);
+	xen_uninit_lock_cpu(cpu);
+	xen_teardown_timer(cpu);
+	xen_pmu_finish(cpu);
 }
 
 static void __noreturn xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -397,6 +395,11 @@ static void xen_pv_cpu_die(unsigned int
 	BUG();
 }
 
+static void xen_pv_cleanup_dead_cpu(unsigned int cpu)
+{
+	BUG();
+}
+
 static void __noreturn xen_pv_play_dead(void)
 {
 	BUG();
@@ -437,6 +440,8 @@ static const struct smp_ops xen_smp_ops
 
 	.cpu_up = xen_pv_cpu_up,
 	.cpu_die = xen_pv_cpu_die,
+	.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
+	.poll_sync_state = xen_pv_poll_sync_state,
 	.cpu_disable = xen_pv_cpu_disable,
 	.play_dead = xen_pv_play_dead,
 



  parent reply	other threads:[~2023-05-08 19:46 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-08 19:43 [patch v3 00/36] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner
2023-05-08 19:43 ` [patch v3 01/36] [patch V2 01/38] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner
2023-05-08 19:43 ` [patch v3 02/36] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner
2023-05-08 19:43 ` [patch v3 03/36] x86/smpboot: Avoid pointless delay calibration if TSC is synchronized Thomas Gleixner
2023-05-08 19:43 ` [patch v3 04/36] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner
2023-05-08 19:43 ` [patch v3 05/36] x86/topology: Remove CPU0 hotplug option Thomas Gleixner
2023-05-08 19:43 ` [patch v3 06/36] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner
2023-05-08 19:43 ` [patch v3 07/36] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner
2023-05-08 19:43 ` [patch v3 08/36] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner
2023-05-09 10:04   ` Peter Zijlstra
2023-05-09 12:07     ` Thomas Gleixner
2023-05-09 17:59       ` Thomas Gleixner
2023-05-09 20:11     ` Thomas Gleixner
2023-05-10  8:39       ` Peter Zijlstra
2023-05-09 10:19   ` Peter Zijlstra
2023-05-09 12:08     ` Thomas Gleixner
2023-05-09 18:03     ` Thomas Gleixner
2023-05-09 10:31   ` Peter Zijlstra
2023-05-09 12:09     ` Thomas Gleixner
2023-05-08 19:43 ` [patch v3 09/36] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner
2023-05-08 19:43 ` [patch v3 10/36] [patch V2 10/38] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner
2023-05-08 19:43 ` [patch v3 11/36] [patch V2 11/38] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner
2023-05-08 19:43 ` [patch v3 12/36] [patch V2 12/38] x86/smpboot: Make TSC synchronization function call based Thomas Gleixner
2023-05-08 19:43 ` [patch v3 13/36] x86/smpboot: Remove cpu_callin_mask Thomas Gleixner
2023-05-09 10:49   ` Peter Zijlstra
2023-05-09 12:09     ` Thomas Gleixner
2023-05-08 19:43 ` [patch v3 14/36] [patch V2 14/38] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner
2023-05-09 11:02   ` Peter Zijlstra
2023-05-09 12:10     ` Thomas Gleixner
2023-05-08 19:43 ` [patch v3 15/36] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner
2023-05-08 19:43 ` [patch v3 16/36] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner
2023-05-08 19:43 ` [patch v3 17/36] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner
2023-05-08 19:43 ` [patch v3 18/36] [patch V2 18/38] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner
2023-05-09 11:07   ` Peter Zijlstra
2023-05-09 11:35     ` Peter Zijlstra
2023-05-09 12:12     ` Thomas Gleixner
2023-05-08 19:43 ` Thomas Gleixner [this message]
2023-05-08 19:43 ` [patch v3 20/36] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner
2023-05-08 19:44 ` [patch v3 21/36] [patch V2 21/38] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner
2023-05-08 19:44 ` [patch v3 22/36] arm64: " Thomas Gleixner
2023-05-08 19:44 ` [patch v3 23/36] [patch V2 23/38] csky/smp: " Thomas Gleixner
2023-05-08 19:44 ` [patch v3 24/36] [patch V2 24/38] MIPS: SMP_CPS: " Thomas Gleixner
2023-05-08 19:44 ` [patch v3 25/36] parisc: " Thomas Gleixner
2023-05-08 19:44 ` [patch v3 26/36] riscv: " Thomas Gleixner
2023-05-08 19:44 ` [patch v3 27/36] cpu/hotplug: Remove unused state functions Thomas Gleixner
2023-05-08 19:44 ` [patch v3 28/36] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner
2023-05-08 19:44 ` [patch v3 29/36] [patch V2 29/38] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner
2023-05-08 19:44 ` [patch v3 30/36] x86/smpboot: Enable split CPU startup Thomas Gleixner
2023-05-08 19:44 ` [patch v3 31/36] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-05-24 20:48   ` Kirill A. Shutemov
2023-05-26 10:14     ` Thomas Gleixner
2023-05-27 13:40       ` Thomas Gleixner
2023-05-29  2:39         ` Kirill A. Shutemov
2023-05-29 19:27           ` Thomas Gleixner
2023-05-29 20:31             ` Kirill A. Shutemov
2023-05-30  0:54               ` Kirill A. Shutemov
2023-05-30  9:26                 ` Thomas Gleixner
2023-05-30 10:34                   ` Thomas Gleixner
2023-05-30 11:37                     ` Kirill A. Shutemov
2023-05-30 12:09                     ` [patch] x86/smpboot: Disable parallel bootup if cc_vendor != NONE Thomas Gleixner
2023-05-30 12:29                       ` Kirill A. Shutemov
2023-05-30 16:00                         ` Thomas Gleixner
2023-05-30 16:56                           ` Sean Christopherson
2023-05-30 19:51                             ` Thomas Gleixner
2023-05-30 20:03                               ` Tom Lendacky
2023-05-30 20:39                                 ` Thomas Gleixner
2023-05-30 21:13                                   ` Tom Lendacky
2023-05-31  7:44                                     ` [patch] x86/smpboot: Fix the parallel bringup decision Thomas Gleixner
2023-05-31 11:07                                       ` Kirill A. Shutemov
2023-05-31 13:58                                       ` Tom Lendacky
2023-05-30 17:02                           ` [patch] x86/smpboot: Disable parallel bootup if cc_vendor != NONE Kirill A. Shutemov
2023-05-30 17:31                             ` Sean Christopherson
2023-05-30  9:26               ` [patch v3 31/36] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner
2023-05-30 10:46               ` [patch] x86/realmode: Make stack lock work in trampoline_compat() Thomas Gleixner
2023-05-30 11:12                 ` Kirill A. Shutemov
2023-06-08 23:34                 ` Yunhong Jiang
2023-06-08 23:57                   ` Andrew Cooper
2023-06-09  0:22                     ` Yunhong Jiang
2023-06-10 19:50                     ` David Laight
2023-06-10 22:51                       ` 'Andrew Cooper'
2023-05-08 19:44 ` [patch v3 32/36] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner
2023-05-08 19:44 ` [patch v3 33/36] x86/apic: Save the APIC virtual base address Thomas Gleixner
2023-05-09  9:20   ` Sergey Shtylyov
2023-05-08 19:44 ` [patch v3 34/36] x86/smpboot: Implement a bit spinlock to protect the realmode stack Thomas Gleixner
2023-05-09 13:13   ` Peter Zijlstra
2023-05-09 13:47     ` Thomas Gleixner
2023-05-08 19:44 ` [patch v3 35/36] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner
2023-05-09 13:57   ` Peter Zijlstra
2023-05-08 19:44 ` [patch v3 36/36] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230508185218.297596481@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=arjan@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=boris.ostrovsky@oracle.com \
    --cc=brgerst@gmail.com \
    --cc=catalin.marinas@arm.com \
    --cc=deller@gmx.de \
    --cc=dwmw2@infradead.org \
    --cc=gpiccoli@igalia.com \
    --cc=guoren@kernel.org \
    --cc=jgross@suse.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=lucjan.lucjanov@gmail.com \
    --cc=mark.rutland@arm.com \
    --cc=mikelley@microsoft.com \
    --cc=oleksandr@natalenko.name \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pmenzel@molgen.mpg.de \
    --cc=sabrapan@amazon.com \
    --cc=seanjc@google.com \
    --cc=thomas.lendacky@amd.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=usama.arif@bytedance.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).