From: Thomas Gleixner <tglx@linutronix.de> To: LKML <linux-kernel@vger.kernel.org> Cc: x86@kernel.org, David Woodhouse <dwmw2@infradead.org>, Andrew Cooper <andrew.cooper3@citrix.com>, Brian Gerst <brgerst@gmail.com>, Arjan van de Veen <arjan@linux.intel.com>, Paolo Bonzini <pbonzini@redhat.com>, Paul McKenney <paulmck@kernel.org>, Tom Lendacky <thomas.lendacky@amd.com>, Sean Christopherson <seanjc@google.com>, Oleksandr Natalenko <oleksandr@natalenko.name>, Paul Menzel <pmenzel@molgen.mpg.de>, "Guilherme G. Piccoli" <gpiccoli@igalia.com>, Piotr Gorski <lucjan.lucjanov@gmail.com>, Usama Arif <usama.arif@bytedance.com>, Juergen Gross <jgross@suse.com>, Boris Ostrovsky <boris.ostrovsky@oracle.com>, xen-devel@lists.xenproject.org, Russell King <linux@armlinux.org.uk>, Arnd Bergmann <arnd@arndb.de>, linux-arm-kernel@lists.infradead.org, Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>, linux-csky@vger.kernel.org, Thomas Bogendoerfer <tsbogend@alpha.franken.de>, linux-mips@vger.kernel.org, "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>, Helge Deller <deller@gmx.de>, linux-parisc@vger.kernel.org, Paul Walmsley <paul.walmsley@sifive.com>, Palmer Dabbelt <palmer@dabbelt.com>, linux-riscv@lists.infradead.org, Mark Rutland <mark.rutland@arm.com>, Sabin Rapan <sabrapan@amazon.com>, "Michael Kelley (LINUX)" <mikelley@microsoft.com>, Ross Philipson <ross.philipson@oracle.com>, David Woodhouse <dwmw@amazon.co.uk> Subject: [patch V4 35/37] x86/smpboot: Implement a bit spinlock to protect the realmode stack Date: Fri, 12 May 2023 23:07:53 +0200 (CEST) [thread overview] Message-ID: <20230512205257.355425551@linutronix.de> (raw) In-Reply-To: 20230512203426.452963764@linutronix.de From: Thomas Gleixner <tglx@linutronix.de> Parallel AP bringup requires that the APs can run fully parallel through the early startup code including the real mode trampoline. To prepare for this implement a bit-spinlock to serialize access to the real mode stack so that parallel upcoming APs are not going to corrupt each others stack while going through the real mode startup code. Co-developed-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mikelley@microsoft.com> --- V4: Simplify the lock implementation - Peter Z. --- arch/x86/include/asm/realmode.h | 3 +++ arch/x86/kernel/head_64.S | 12 ++++++++++++ arch/x86/realmode/init.c | 3 +++ arch/x86/realmode/rm/trampoline_64.S | 23 ++++++++++++++++++----- 4 files changed, 36 insertions(+), 5 deletions(-) --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -52,6 +52,7 @@ struct trampoline_header { u64 efer; u32 cr4; u32 flags; + u32 lock; #endif }; @@ -64,6 +65,8 @@ extern unsigned long initial_stack; extern unsigned long initial_vc_handler; #endif +extern u32 *trampoline_lock; + extern unsigned char real_mode_blob[]; extern unsigned char real_mode_relocs[]; --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -252,6 +252,16 @@ SYM_INNER_LABEL(secondary_startup_64_no_ movq TASK_threadsp(%rax), %rsp /* + * Now that this CPU is running on its own stack, drop the realmode + * protection. For the boot CPU the pointer is NULL! + */ + movq trampoline_lock(%rip), %rax + testq %rax, %rax + jz .Lsetup_gdt + movl $0, (%rax) + +.Lsetup_gdt: + /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here @@ -433,6 +443,8 @@ SYM_DATA(initial_code, .quad x86_64_star #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) #endif + +SYM_DATA(trampoline_lock, .quad 0); __FINITDATA __INIT --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -154,6 +154,9 @@ static void __init setup_real_mode(void) trampoline_header->flags = 0; + trampoline_lock = &trampoline_header->lock; + *trampoline_lock = 0; + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); /* Map the real mode stub as virtual == physical */ --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -37,6 +37,20 @@ .text .code16 +.macro LOAD_REALMODE_ESP + /* + * Make sure only one CPU fiddles with the realmode stack + */ +.Llock_rm\@: + lock btsl $0, tr_lock + jnc 2f + pause + jmp .Llock_rm\@ +2: + # Setup stack + movl $rm_stack_end, %esp +.endm + .balign PAGE_SIZE SYM_CODE_START(trampoline_start) cli # We should be safe anyway @@ -49,8 +63,7 @@ SYM_CODE_START(trampoline_start) mov %ax, %es mov %ax, %ss - # Setup stack - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP call verify_cpu # Verify the cpu supports long mode testl %eax, %eax # Check for return code @@ -93,8 +106,7 @@ SYM_CODE_START(sev_es_trampoline_start) mov %ax, %es mov %ax, %ss - # Setup stack - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP jmp .Lswitch_to_protected SYM_CODE_END(sev_es_trampoline_start) @@ -177,7 +189,7 @@ SYM_CODE_START(pa_trampoline_compat) * In compatibility mode. Prep ESP and DX for startup_32, then disable * paging and complete the switch to legacy 32-bit mode. */ - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP movw $__KERNEL_DS, %dx movl $(CR0_STATE & ~X86_CR0_PG), %eax @@ -241,6 +253,7 @@ SYM_DATA_START(trampoline_header) SYM_DATA(tr_efer, .space 8) SYM_DATA(tr_cr4, .space 4) SYM_DATA(tr_flags, .space 4) + SYM_DATA(tr_lock, .space 4) SYM_DATA_END(trampoline_header) #include "trampoline_common.S"
WARNING: multiple messages have this Message-ID (diff)
From: Thomas Gleixner <tglx@linutronix.de> To: LKML <linux-kernel@vger.kernel.org> Cc: x86@kernel.org, David Woodhouse <dwmw2@infradead.org>, Andrew Cooper <andrew.cooper3@citrix.com>, Brian Gerst <brgerst@gmail.com>, Arjan van de Veen <arjan@linux.intel.com>, Paolo Bonzini <pbonzini@redhat.com>, Paul McKenney <paulmck@kernel.org>, Tom Lendacky <thomas.lendacky@amd.com>, Sean Christopherson <seanjc@google.com>, Oleksandr Natalenko <oleksandr@natalenko.name>, Paul Menzel <pmenzel@molgen.mpg.de>, "Guilherme G. Piccoli" <gpiccoli@igalia.com>, Piotr Gorski <lucjan.lucjanov@gmail.com>, Usama Arif <usama.arif@bytedance.com>, Juergen Gross <jgross@suse.com>, Boris Ostrovsky <boris.ostrovsky@oracle.com>, xen-devel@lists.xenproject.org, Russell King <linux@armlinux.org.uk>, Arnd Bergmann <arnd@arndb.de>, linux-arm-kernel@lists.infradead.org, Catalin Marinas <catalin.marinas@arm.com>, Will Deacon <will@kernel.org>, Guo Ren <guoren@kernel.org>, linux-csky@vger.kernel.org, Thomas Bogendoerfer <tsbogend@alpha.franken.de>, linux-mips@vger.kernel.org, "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>, Helge Deller <deller@gmx.de>, linux-parisc@vger.kernel.org, Paul Walmsley <paul.walmsley@sifive.com>, Palmer Dabbelt <palmer@dabbelt.com>, linux-riscv@lists.infradead.org, Mark Rutland <mark.rutland@arm.com>, Sabin Rapan <sabrapan@amazon.com>, "Michael Kelley (LINUX)" <mikelley@microsoft.com>, Ross Philipson <ross.philipson@oracle.com>, David Woodhouse <dwmw@amazon.co.uk> Subject: [patch V4 35/37] x86/smpboot: Implement a bit spinlock to protect the realmode stack Date: Fri, 12 May 2023 23:07:53 +0200 (CEST) [thread overview] Message-ID: <20230512205257.355425551@linutronix.de> (raw) In-Reply-To: 20230512203426.452963764@linutronix.de From: Thomas Gleixner <tglx@linutronix.de> Parallel AP bringup requires that the APs can run fully parallel through the early startup code including the real mode trampoline. To prepare for this implement a bit-spinlock to serialize access to the real mode stack so that parallel upcoming APs are not going to corrupt each others stack while going through the real mode startup code. Co-developed-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mikelley@microsoft.com> --- V4: Simplify the lock implementation - Peter Z. --- arch/x86/include/asm/realmode.h | 3 +++ arch/x86/kernel/head_64.S | 12 ++++++++++++ arch/x86/realmode/init.c | 3 +++ arch/x86/realmode/rm/trampoline_64.S | 23 ++++++++++++++++++----- 4 files changed, 36 insertions(+), 5 deletions(-) --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -52,6 +52,7 @@ struct trampoline_header { u64 efer; u32 cr4; u32 flags; + u32 lock; #endif }; @@ -64,6 +65,8 @@ extern unsigned long initial_stack; extern unsigned long initial_vc_handler; #endif +extern u32 *trampoline_lock; + extern unsigned char real_mode_blob[]; extern unsigned char real_mode_relocs[]; --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -252,6 +252,16 @@ SYM_INNER_LABEL(secondary_startup_64_no_ movq TASK_threadsp(%rax), %rsp /* + * Now that this CPU is running on its own stack, drop the realmode + * protection. For the boot CPU the pointer is NULL! + */ + movq trampoline_lock(%rip), %rax + testq %rax, %rax + jz .Lsetup_gdt + movl $0, (%rax) + +.Lsetup_gdt: + /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here @@ -433,6 +443,8 @@ SYM_DATA(initial_code, .quad x86_64_star #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) #endif + +SYM_DATA(trampoline_lock, .quad 0); __FINITDATA __INIT --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -154,6 +154,9 @@ static void __init setup_real_mode(void) trampoline_header->flags = 0; + trampoline_lock = &trampoline_header->lock; + *trampoline_lock = 0; + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); /* Map the real mode stub as virtual == physical */ --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -37,6 +37,20 @@ .text .code16 +.macro LOAD_REALMODE_ESP + /* + * Make sure only one CPU fiddles with the realmode stack + */ +.Llock_rm\@: + lock btsl $0, tr_lock + jnc 2f + pause + jmp .Llock_rm\@ +2: + # Setup stack + movl $rm_stack_end, %esp +.endm + .balign PAGE_SIZE SYM_CODE_START(trampoline_start) cli # We should be safe anyway @@ -49,8 +63,7 @@ SYM_CODE_START(trampoline_start) mov %ax, %es mov %ax, %ss - # Setup stack - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP call verify_cpu # Verify the cpu supports long mode testl %eax, %eax # Check for return code @@ -93,8 +106,7 @@ SYM_CODE_START(sev_es_trampoline_start) mov %ax, %es mov %ax, %ss - # Setup stack - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP jmp .Lswitch_to_protected SYM_CODE_END(sev_es_trampoline_start) @@ -177,7 +189,7 @@ SYM_CODE_START(pa_trampoline_compat) * In compatibility mode. Prep ESP and DX for startup_32, then disable * paging and complete the switch to legacy 32-bit mode. */ - movl $rm_stack_end, %esp + LOAD_REALMODE_ESP movw $__KERNEL_DS, %dx movl $(CR0_STATE & ~X86_CR0_PG), %eax @@ -241,6 +253,7 @@ SYM_DATA_START(trampoline_header) SYM_DATA(tr_efer, .space 8) SYM_DATA(tr_cr4, .space 4) SYM_DATA(tr_flags, .space 4) + SYM_DATA(tr_lock, .space 4) SYM_DATA_END(trampoline_header) #include "trampoline_common.S" _______________________________________________ linux-riscv mailing list linux-riscv@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-riscv
next prev parent reply other threads:[~2023-05-12 21:10 UTC|newest] Thread overview: 163+ messages / expand[flat|nested] mbox.gz Atom feed top 2023-05-12 21:06 [patch V4 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Thomas Gleixner 2023-05-12 21:06 ` Thomas Gleixner 2023-05-12 21:06 ` [patch V4 01/37] x86/smpboot: Cleanup topology_phys_to_logical_pkg()/die() Thomas Gleixner 2023-05-12 21:06 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 02/37] cpu/hotplug: Mark arch_disable_smp_support() and bringup_nonboot_cpus() __init Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 03/37] x86/smpboot: Avoid pointless delay calibration if TSC is synchronized Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 04/37] x86/smpboot: Rename start_cpu0() to soft_restart_cpu() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-06-12 23:45 ` [patch V4 04/37] " Philippe Mathieu-Daudé 2023-06-12 23:45 ` Philippe Mathieu-Daudé 2023-06-12 23:45 ` Philippe Mathieu-Daudé 2023-05-12 21:07 ` [patch V4 05/37] x86/topology: Remove CPU0 hotplug option Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 06/37] x86/smpboot: Remove the CPU0 hotplug kludge Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 07/37] x86/smpboot: Restrict soft_restart_cpu() to SEV Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-06-12 23:46 ` [patch V4 07/37] " Philippe Mathieu-Daudé 2023-06-12 23:46 ` Philippe Mathieu-Daudé 2023-06-12 23:46 ` Philippe Mathieu-Daudé 2023-05-12 21:07 ` [patch V4 08/37] x86/smpboot: Remove unnecessary barrier() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 09/37] x86/smpboot: Split up native_cpu_up() into separate phases and document them Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for David Woodhouse 2023-05-12 21:07 ` [patch V4 10/37] x86/smpboot: Get rid of cpu_init_secondary() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-06-12 23:49 ` [patch V4 10/37] " Philippe Mathieu-Daudé 2023-06-12 23:49 ` Philippe Mathieu-Daudé 2023-06-12 23:49 ` Philippe Mathieu-Daudé 2023-05-12 21:07 ` [patch V4 11/37] x86/cpu/cacheinfo: Remove cpu_callout_mask dependency Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 12/37] x86/smpboot: Move synchronization masks to SMP boot code Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 13/37] x86/smpboot: Make TSC synchronization function call based Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:10 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 14/37] x86/smpboot: Remove cpu_callin_mask Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 15/37] cpu/hotplug: Rework sparse_irq locking in bringup_cpu() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 16/37] x86/smpboot: Remove wait for cpu_online() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 17/37] x86/xen/smp_pv: Remove wait for CPU online Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 18/37] x86/xen/hvm: Get rid of DEAD_FROZEN handling Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 19/37] cpu/hotplug: Add CPU state tracking and synchronization Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 20/37] x86/smpboot: Switch to hotplug core state synchronization Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 21/37] cpu/hotplug: Remove cpu_report_state() and related unused cruft Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 22/37] ARM: smp: Switch to hotplug core state synchronization Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 23/37] arm64: " Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 24/37] csky/smp: " Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 25/37] MIPS: SMP_CPS: " Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 26/37] parisc: " Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 27/37] riscv: " Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 28/37] cpu/hotplug: Remove unused state functions Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 29/37] cpu/hotplug: Reset task stack state in _cpu_up() Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for David Woodhouse 2023-05-12 21:07 ` [patch V4 30/37] cpu/hotplug: Provide a split up CPUHP_BRINGUP mechanism Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 31/37] x86/smpboot: Enable split CPU startup Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 32/37] x86/apic: Provide cpu_primary_thread mask Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 33/37] cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-22 19:45 ` [patch V4 33/37] " Mark Brown 2023-05-22 19:45 ` Mark Brown 2023-05-22 19:45 ` Mark Brown 2023-05-22 21:04 ` Thomas Gleixner 2023-05-22 21:04 ` Thomas Gleixner 2023-05-22 21:04 ` Thomas Gleixner 2023-05-22 22:27 ` Mark Brown 2023-05-22 22:27 ` Mark Brown 2023-05-22 22:27 ` Mark Brown 2023-05-22 23:12 ` Thomas Gleixner 2023-05-22 23:12 ` Thomas Gleixner 2023-05-22 23:12 ` Thomas Gleixner 2023-05-23 10:19 ` Mark Brown 2023-05-23 10:19 ` Mark Brown 2023-05-23 10:19 ` Mark Brown 2023-05-23 16:13 ` [tip: smp/core] cpu/hotplug: Fix off by one in cpuhp_bringup_mask() tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 34/37] x86/apic: Save the APIC virtual base address Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner [this message] 2023-05-12 21:07 ` [patch V4 35/37] x86/smpboot: Implement a bit spinlock to protect the realmode stack Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-12 21:07 ` [patch V4 36/37] x86/smpboot: Support parallel startup of secondary CPUs Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for David Woodhouse 2023-05-19 16:28 ` [patch V4 36/37] " Jeffrey Hugo 2023-05-19 16:28 ` Jeffrey Hugo 2023-05-19 16:28 ` Jeffrey Hugo 2023-05-19 16:57 ` Andrew Cooper 2023-05-19 16:57 ` Andrew Cooper 2023-05-19 16:57 ` Andrew Cooper 2023-05-19 17:44 ` Jeffrey Hugo 2023-05-19 17:44 ` Jeffrey Hugo 2023-05-19 17:44 ` Jeffrey Hugo 2023-05-12 21:07 ` [patch V4 37/37] x86/smpboot/64: Implement arch_cpuhp_init_parallel_bringup() and enable it Thomas Gleixner 2023-05-12 21:07 ` Thomas Gleixner 2023-05-15 12:00 ` Peter Zijlstra 2023-05-15 12:00 ` Peter Zijlstra 2023-05-15 12:00 ` Peter Zijlstra 2023-05-16 9:09 ` [tip: smp/core] " tip-bot2 for Thomas Gleixner 2023-05-13 18:32 ` [patch V4 00/37] cpu/hotplug, x86: Reworked parallel CPU bringup Oleksandr Natalenko 2023-05-13 18:32 ` Oleksandr Natalenko 2023-05-13 21:00 ` Helge Deller 2023-05-13 21:00 ` Helge Deller 2023-05-14 21:48 ` Guilherme G. Piccoli 2023-05-14 21:48 ` Guilherme G. Piccoli 2023-05-22 10:57 ` [PATCH] x86/apic: Fix use of X{,2}APIC_ENABLE in asm with older binutils Andrew Cooper 2023-05-22 10:57 ` Andrew Cooper 2023-05-22 10:57 ` Andrew Cooper 2023-05-22 11:17 ` Russell King (Oracle) 2023-05-22 11:17 ` Russell King (Oracle) 2023-05-22 11:17 ` Russell King (Oracle) 2023-05-22 12:12 ` [tip: smp/core] " tip-bot2 for Andrew Cooper
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20230512205257.355425551@linutronix.de \ --to=tglx@linutronix.de \ --cc=James.Bottomley@HansenPartnership.com \ --cc=andrew.cooper3@citrix.com \ --cc=arjan@linux.intel.com \ --cc=arnd@arndb.de \ --cc=boris.ostrovsky@oracle.com \ --cc=brgerst@gmail.com \ --cc=catalin.marinas@arm.com \ --cc=deller@gmx.de \ --cc=dwmw2@infradead.org \ --cc=dwmw@amazon.co.uk \ --cc=gpiccoli@igalia.com \ --cc=guoren@kernel.org \ --cc=jgross@suse.com \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-csky@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mips@vger.kernel.org \ --cc=linux-parisc@vger.kernel.org \ --cc=linux-riscv@lists.infradead.org \ --cc=linux@armlinux.org.uk \ --cc=lucjan.lucjanov@gmail.com \ --cc=mark.rutland@arm.com \ --cc=mikelley@microsoft.com \ --cc=oleksandr@natalenko.name \ --cc=palmer@dabbelt.com \ --cc=paul.walmsley@sifive.com \ --cc=paulmck@kernel.org \ --cc=pbonzini@redhat.com \ --cc=pmenzel@molgen.mpg.de \ --cc=ross.philipson@oracle.com \ --cc=sabrapan@amazon.com \ --cc=seanjc@google.com \ --cc=thomas.lendacky@amd.com \ --cc=tsbogend@alpha.franken.de \ --cc=usama.arif@bytedance.com \ --cc=will@kernel.org \ --cc=x86@kernel.org \ --cc=xen-devel@lists.xenproject.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.