All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joerg Roedel <joro@8bytes.org>
To: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@kernel.org>, "H . Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Josh Poimboeuf <jpoimboe@redhat.com>,
	Juergen Gross <jgross@suse.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Borislav Petkov <bp@alien8.de>, Jiri Kosina <jkosina@suse.cz>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Brian Gerst <brgerst@gmail.com>,
	David Laight <David.Laight@aculab.com>,
	Denys Vlasenko <dvlasenk@redhat.com>,
	Eduardo Valentin <eduval@amazon.com>,
	Greg KH <gregkh@linuxfoundation.org>,
	Will Deacon <will.deacon@arm.com>,
	aliguori@amazon.com, daniel.gruss@iaik.tugraz.at,
	hughd@google.com, keescook@google.com,
	Andrea Arcangeli <aarcange@redhat.com>,
	Waiman Long <llong@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	"David H . Gutteridge" <dhgutteridge@sympatico.ca>,
	jroedel@suse.de, joro@8bytes.org
Subject: [PATCH 07/39] x86/entry/32: Enter the kernel via trampoline stack
Date: Wed, 11 Jul 2018 13:29:14 +0200	[thread overview]
Message-ID: <1531308586-29340-8-git-send-email-joro@8bytes.org> (raw)
In-Reply-To: <1531308586-29340-1-git-send-email-joro@8bytes.org>

From: Joerg Roedel <jroedel@suse.de>

Use the entry-stack as a trampoline to enter the kernel. The
entry-stack is already in the cpu_entry_area and will be
mapped to userspace when PTI is enabled.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/entry/entry_32.S        | 136 +++++++++++++++++++++++++++++++--------
 arch/x86/include/asm/switch_to.h |   6 +-
 arch/x86/kernel/asm-offsets.c    |   1 +
 arch/x86/kernel/cpu/common.c     |   5 +-
 arch/x86/kernel/process.c        |   2 -
 arch/x86/kernel/process_32.c     |  10 +--
 6 files changed, 121 insertions(+), 39 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 61303fa..528db7d 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -154,25 +154,36 @@
 
 #endif /* CONFIG_X86_32_LAZY_GS */
 
-.macro SAVE_ALL pt_regs_ax=%eax
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
 	cld
+	/* Push segment registers and %eax */
 	PUSH_GS
 	pushl	%fs
 	pushl	%es
 	pushl	%ds
 	pushl	\pt_regs_ax
+
+	/* Load kernel segments */
+	movl	$(__USER_DS), %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	$(__KERNEL_PERCPU), %eax
+	movl	%eax, %fs
+	SET_KERNEL_GS %eax
+
+	/* Push integer registers and complete PT_REGS */
 	pushl	%ebp
 	pushl	%edi
 	pushl	%esi
 	pushl	%edx
 	pushl	%ecx
 	pushl	%ebx
-	movl	$(__USER_DS), %edx
-	movl	%edx, %ds
-	movl	%edx, %es
-	movl	$(__KERNEL_PERCPU), %edx
-	movl	%edx, %fs
-	SET_KERNEL_GS %edx
+
+	/* Switch to kernel stack if necessary */
+.if \switch_stacks > 0
+	SWITCH_TO_KERNEL_STACK
+.endif
+
 .endm
 
 /*
@@ -269,6 +280,72 @@
 .Lend_\@:
 #endif /* CONFIG_X86_ESPFIX32 */
 .endm
+
+
+/*
+ * Called with pt_regs fully populated and kernel segments loaded,
+ * so we can access PER_CPU and use the integer registers.
+ *
+ * We need to be very careful here with the %esp switch, because an NMI
+ * can happen everywhere. If the NMI handler finds itself on the
+ * entry-stack, it will overwrite the task-stack and everything we
+ * copied there. So allocate the stack-frame on the task-stack and
+ * switch to it before we do any copying.
+ */
+.macro SWITCH_TO_KERNEL_STACK
+
+	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+	/* Are we on the entry stack? Bail out if not! */
+	movl	PER_CPU_VAR(cpu_entry_area), %edi
+	addl	$CPU_ENTRY_AREA_entry_stack, %edi
+	cmpl	%esp, %edi
+	jae	.Lend_\@
+
+	/* Load stack pointer into %esi and %edi */
+	movl	%esp, %esi
+	movl	%esi, %edi
+
+	/* Move %edi to the top of the entry stack */
+	andl	$(MASK_entry_stack), %edi
+	addl	$(SIZEOF_entry_stack), %edi
+
+	/* Load top of task-stack into %edi */
+	movl	TSS_entry_stack(%edi), %edi
+
+	/* Bytes to copy */
+	movl	$PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+	testl	$X86_EFLAGS_VM, PT_EFLAGS(%esi)
+	jz	.Lcopy_pt_regs_\@
+
+	/*
+	 * Stack-frame contains 4 additional segment registers when
+	 * coming from VM86 mode
+	 */
+	addl	$(4 * 4), %ecx
+
+.Lcopy_pt_regs_\@:
+#endif
+
+	/* Allocate frame on task-stack */
+	subl	%ecx, %edi
+
+	/* Switch to task-stack */
+	movl	%edi, %esp
+
+	/*
+	 * We are now on the task-stack and can safely copy over the
+	 * stack-frame
+	 */
+	shrl	$2, %ecx
+	cld
+	rep movsl
+
+.Lend_\@:
+.endm
+
 /*
  * %eax: prev task
  * %edx: next task
@@ -461,6 +538,7 @@ ENTRY(xen_sysenter_target)
  */
 ENTRY(entry_SYSENTER_32)
 	movl	TSS_entry_stack(%esp), %esp
+
 .Lsysenter_past_esp:
 	pushl	$__USER_DS		/* pt_regs->ss */
 	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
@@ -469,7 +547,7 @@ ENTRY(entry_SYSENTER_32)
 	pushl	$__USER_CS		/* pt_regs->cs */
 	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
 	pushl	%eax			/* pt_regs->orig_ax */
-	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
+	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest, stack already switched */
 
 	/*
 	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -580,7 +658,8 @@ ENDPROC(entry_SYSENTER_32)
 ENTRY(entry_INT80_32)
 	ASM_CLAC
 	pushl	%eax			/* pt_regs->orig_ax */
-	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
+
+	SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1	/* save rest */
 
 	/*
 	 * User mode is traced as though IRQs are on, and the interrupt gate
@@ -677,7 +756,8 @@ END(irq_entries_start)
 common_interrupt:
 	ASM_CLAC
 	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
-	SAVE_ALL
+
+	SAVE_ALL switch_stacks=1
 	ENCODE_FRAME_POINTER
 	TRACE_IRQS_OFF
 	movl	%esp, %eax
@@ -685,16 +765,16 @@ common_interrupt:
 	jmp	ret_from_intr
 ENDPROC(common_interrupt)
 
-#define BUILD_INTERRUPT3(name, nr, fn)	\
-ENTRY(name)				\
-	ASM_CLAC;			\
-	pushl	$~(nr);			\
-	SAVE_ALL;			\
-	ENCODE_FRAME_POINTER;		\
-	TRACE_IRQS_OFF			\
-	movl	%esp, %eax;		\
-	call	fn;			\
-	jmp	ret_from_intr;		\
+#define BUILD_INTERRUPT3(name, nr, fn)			\
+ENTRY(name)						\
+	ASM_CLAC;					\
+	pushl	$~(nr);					\
+	SAVE_ALL switch_stacks=1;			\
+	ENCODE_FRAME_POINTER;				\
+	TRACE_IRQS_OFF					\
+	movl	%esp, %eax;				\
+	call	fn;					\
+	jmp	ret_from_intr;				\
 ENDPROC(name)
 
 #define BUILD_INTERRUPT(name, nr)		\
@@ -926,16 +1006,20 @@ common_exception:
 	pushl	%es
 	pushl	%ds
 	pushl	%eax
+	movl	$(__USER_DS), %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	$(__KERNEL_PERCPU), %eax
+	movl	%eax, %fs
 	pushl	%ebp
 	pushl	%edi
 	pushl	%esi
 	pushl	%edx
 	pushl	%ecx
 	pushl	%ebx
+	SWITCH_TO_KERNEL_STACK
 	ENCODE_FRAME_POINTER
 	cld
-	movl	$(__KERNEL_PERCPU), %ecx
-	movl	%ecx, %fs
 	UNWIND_ESPFIX_STACK
 	GS_TO_REG %ecx
 	movl	PT_GS(%esp), %edi		# get the function address
@@ -943,9 +1027,6 @@ common_exception:
 	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
 	REG_TO_PTGS %ecx
 	SET_KERNEL_GS %ecx
-	movl	$(__USER_DS), %ecx
-	movl	%ecx, %ds
-	movl	%ecx, %es
 	TRACE_IRQS_OFF
 	movl	%esp, %eax			# pt_regs pointer
 	CALL_NOSPEC %edi
@@ -964,6 +1045,7 @@ ENTRY(debug)
 	 */
 	ASM_CLAC
 	pushl	$-1				# mark this as an int
+
 	SAVE_ALL
 	ENCODE_FRAME_POINTER
 	xorl	%edx, %edx			# error code 0
@@ -999,6 +1081,7 @@ END(debug)
  */
 ENTRY(nmi)
 	ASM_CLAC
+
 #ifdef CONFIG_X86_ESPFIX32
 	pushl	%eax
 	movl	%ss, %eax
@@ -1066,7 +1149,8 @@ END(nmi)
 ENTRY(int3)
 	ASM_CLAC
 	pushl	$-1				# mark this as an int
-	SAVE_ALL
+
+	SAVE_ALL switch_stacks=1
 	ENCODE_FRAME_POINTER
 	TRACE_IRQS_OFF
 	xorl	%edx, %edx			# zero error code
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index eb5f799..20e5f7ab 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -89,13 +89,9 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
-	/* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
-#ifdef CONFIG_X86_32
-	load_sp0(task->thread.sp0);
-#else
+	/* sp0 always points to the entry trampoline stack, which is constant: */
 	if (static_cpu_has(X86_FEATURE_XENPV))
 		load_sp0(task_top_of_stack(task));
-#endif
 }
 
 #endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index a1e1628..01de31d 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -103,6 +103,7 @@ void common(void) {
 	OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
 	OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
 	DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
+	DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
 
 	/* Offset for sp0 and sp1 into the tss_struct */
 	OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index eb4cb3e..43a927e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1804,11 +1804,12 @@ void cpu_init(void)
 	enter_lazy_tlb(&init_mm, curr);
 
 	/*
-	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
-	 * task never enters user mode.
+	 * Initialize the TSS.  sp0 points to the entry trampoline stack
+	 * regardless of what task is running.
 	 */
 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 	load_TR_desc();
+	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
 	load_mm_ldt(&init_mm);
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 30ca2d1..c93fcfd 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
 		 */
 		.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
 
-#ifdef CONFIG_X86_64
 		/*
 		 * .sp1 is cpu_current_top_of_stack.  The init task never
 		 * runs user code, but cpu_current_top_of_stack should still
 		 * be well defined before the first context switch.
 		 */
 		.sp1 = TOP_OF_INIT_STACK,
-#endif
 
 #ifdef CONFIG_X86_32
 		.ss0 = __KERNEL_DS,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index ec62cc7..04bbf93 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -287,10 +287,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 */
 	update_sp0(next_p);
 	refresh_sysenter_cs(next);
-	this_cpu_write(cpu_current_top_of_stack,
-		       (unsigned long)task_stack_page(next_p) +
-		       THREAD_SIZE);
-	/* SYSENTER reads the task-stack from tss.sp1 */
+	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+	/*
+	 * TODO: Find a way to let cpu_current_top_of_stack point to
+	 * cpu_tss_rw.x86_tss.sp1. Doing so now results in stack corruption with
+	 * iret exceptions.
+	 */
 	this_cpu_write(cpu_tss_rw.x86_tss.sp1, next_p->thread.sp0);
 
 	/*
-- 
2.7.4


  parent reply	other threads:[~2018-07-11 11:30 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-11 11:29 [PATCH 00/39 v7] PTI support for x86-32 Joerg Roedel
2018-07-11 11:29 ` [PATCH 01/39] x86/asm-offsets: Move TSS_sp0 and TSS_sp1 to asm-offsets.c Joerg Roedel
2018-07-12 20:44   ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 02/39] x86/entry/32: Rename TSS_sysenter_sp0 to TSS_entry_stack Joerg Roedel
2018-07-12 20:44   ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 03/39] x86/entry/32: Load task stack from x86_tss.sp1 in SYSENTER handler Joerg Roedel
2018-07-12 20:49   ` Andy Lutomirski
2018-07-13  9:48     ` Joerg Roedel
2018-07-13  9:48       ` Joerg Roedel
2018-07-13 17:19       ` Andy Lutomirski
2018-07-13 23:17         ` Andy Lutomirski
2018-07-17  7:05           ` Joerg Roedel
2018-07-17 20:04             ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 04/39] x86/entry/32: Put ESPFIX code into a macro Joerg Roedel
2018-07-11 11:29 ` [PATCH 05/39] x86/entry/32: Unshare NMI return path Joerg Roedel
2018-07-12 20:53   ` Andy Lutomirski
2018-07-13 10:05     ` Joerg Roedel
2018-07-13 17:26       ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 06/39] x86/entry/32: Split off return-to-kernel path Joerg Roedel
2018-07-11 11:29 ` Joerg Roedel [this message]
2018-07-12 21:09   ` [PATCH 07/39] x86/entry/32: Enter the kernel via trampoline stack Andy Lutomirski
2018-07-13 10:56     ` Joerg Roedel
2018-07-13 10:56       ` Joerg Roedel
2018-07-13 17:21       ` Andy Lutomirski
2018-07-17  7:07         ` Joerg Roedel
2018-07-11 11:29 ` [PATCH 08/39] x86/entry/32: Leave " Joerg Roedel
2018-07-11 11:29 ` [PATCH 09/39] x86/entry/32: Introduce SAVE_ALL_NMI and RESTORE_ALL_NMI Joerg Roedel
2018-07-11 11:29 ` [PATCH 10/39] x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack Joerg Roedel
2018-07-13 23:31   ` Andy Lutomirski
2018-07-14  5:21     ` Joerg Roedel
2018-07-14  6:26       ` Andy Lutomirski
2018-07-14  8:01         ` Joerg Roedel
2018-07-14  8:01           ` Joerg Roedel
2018-07-14 14:36           ` Andy Lutomirski
2018-07-17  7:15             ` Joerg Roedel
2018-07-17  7:15               ` Joerg Roedel
2018-07-17 20:06               ` Andy Lutomirski
2018-07-18 11:59                 ` Joerg Roedel
2018-07-11 11:29 ` [PATCH 11/39] x86/entry/32: Simplify debug entry point Joerg Roedel
2018-07-11 11:29 ` [PATCH 12/39] x86/32: Use tss.sp1 as cpu_current_top_of_stack Joerg Roedel
2018-07-11 11:29 ` [PATCH 13/39] x86/entry/32: Add PTI cr3 switch to non-NMI entry/exit points Joerg Roedel
2018-07-11 11:29 ` [PATCH 14/39] x86/entry/32: Add PTI cr3 switches to NMI handler code Joerg Roedel
2018-07-11 11:29 ` [PATCH 15/39] x86/pgtable: Rename pti_set_user_pgd to pti_set_user_pgtbl Joerg Roedel
2018-07-11 11:29 ` [PATCH 16/39] x86/pgtable/pae: Unshare kernel PMDs when PTI is enabled Joerg Roedel
2018-07-11 11:29 ` [PATCH 17/39] x86/pgtable/32: Allocate 8k page-tables " Joerg Roedel
2018-07-11 11:29 ` [PATCH 18/39] x86/pgtable: Move pgdp kernel/user conversion functions to pgtable.h Joerg Roedel
2018-07-11 11:29 ` [PATCH 19/39] x86/pgtable: Move pti_set_user_pgtbl() " Joerg Roedel
2018-07-11 11:29 ` [PATCH 20/39] x86/pgtable: Move two more functions from pgtable_64.h " Joerg Roedel
2018-07-11 11:29 ` [PATCH 21/39] x86/mm/pae: Populate valid user PGD entries Joerg Roedel
2018-07-11 11:29 ` [PATCH 22/39] x86/mm/pae: Populate the user page-table with user pgd's Joerg Roedel
2018-07-11 11:29 ` [PATCH 23/39] x86/mm/legacy: " Joerg Roedel
2018-07-11 11:29 ` [PATCH 24/39] x86/mm/pti: Add an overflow check to pti_clone_pmds() Joerg Roedel
2018-07-11 11:29 ` [PATCH 25/39] x86/mm/pti: Define X86_CR3_PTI_PCID_USER_BIT on x86_32 Joerg Roedel
2018-07-11 11:29 ` [PATCH 26/39] x86/mm/pti: Clone CPU_ENTRY_AREA on PMD level " Joerg Roedel
2018-07-11 11:29 ` [PATCH 27/39] x86/mm/pti: Make pti_clone_kernel_text() compile on 32 bit Joerg Roedel
2018-07-11 11:29 ` [PATCH 28/39] x86/mm/pti: Keep permissions when cloning kernel text in pti_clone_kernel_text() Joerg Roedel
2018-07-13 23:25   ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 29/39] x86/mm/pti: Introduce pti_finalize() Joerg Roedel
2018-07-11 11:29 ` [PATCH 30/39] x86/mm/pti: Clone entry-text again in pti_finalize() Joerg Roedel
2018-07-13 23:21   ` Andy Lutomirski
2018-07-14  5:04     ` Joerg Roedel
2018-07-11 11:29 ` [PATCH 31/39] x86/mm/dump_pagetables: Define INIT_PGD Joerg Roedel
2018-07-11 11:29 ` [PATCH 32/39] x86/pgtable/pae: Use separate kernel PMDs for user page-table Joerg Roedel
2018-07-11 11:29 ` [PATCH 33/39] x86/ldt: Reserve address-space range on 32 bit for the LDT Joerg Roedel
2018-07-11 11:29 ` [PATCH 34/39] x86/ldt: Define LDT_END_ADDR Joerg Roedel
2018-07-13 17:29   ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 35/39] x86/ldt: Split out sanity check in map_ldt_struct() Joerg Roedel
2018-07-13 23:18   ` Andy Lutomirski
2018-07-11 11:29 ` [PATCH 36/39] x86/ldt: Enable LDT user-mapping for PAE Joerg Roedel
2018-07-11 11:29 ` [PATCH 37/39] x86/pti: Allow CONFIG_PAGE_TABLE_ISOLATION for x86_32 Joerg Roedel
2018-07-11 11:29 ` [PATCH 38/39] x86/mm/pti: Add Warning when booting on a PCID capable CPU Joerg Roedel
2018-07-13 18:59   ` Andy Lutomirski
2018-07-14  5:08     ` Joerg Roedel
2018-07-11 11:29 ` [PATCH 39/39] x86/entry/32: Add debug code to check entry/exit cr3 Joerg Roedel
2018-07-13 17:28   ` Andy Lutomirski
2018-07-14  5:09     ` Joerg Roedel
2018-07-11 16:28 ` [PATCH 00/39 v7] PTI support for x86-32 Linus Torvalds
2018-07-11 17:28   ` Jiri Kosina
2018-07-11 19:57     ` Thomas Backlund
2018-07-12 13:59       ` Boris Ostrovsky
2018-07-11 21:07   ` Pavel Machek
2018-07-16  7:51 ` Pavel Machek
2018-07-17  2:07 ` David H. Gutteridge
2018-07-17  6:16   ` Joerg Roedel
2018-07-18  9:40 [PATCH 00/39 v8] " Joerg Roedel
2018-07-18  9:40 ` [PATCH 07/39] x86/entry/32: Enter the kernel via trampoline stack Joerg Roedel
2018-07-18 18:09   ` Brian Gerst
2018-07-19 20:52     ` Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1531308586-29340-8-git-send-email-joro@8bytes.org \
    --to=joro@8bytes.org \
    --cc=David.Laight@aculab.com \
    --cc=aarcange@redhat.com \
    --cc=aliguori@amazon.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=brgerst@gmail.com \
    --cc=daniel.gruss@iaik.tugraz.at \
    --cc=dave.hansen@intel.com \
    --cc=dhgutteridge@sympatico.ca \
    --cc=dvlasenk@redhat.com \
    --cc=eduval@amazon.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hpa@zytor.com \
    --cc=hughd@google.com \
    --cc=jgross@suse.com \
    --cc=jkosina@suse.cz \
    --cc=jpoimboe@redhat.com \
    --cc=jroedel@suse.de \
    --cc=keescook@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=llong@redhat.com \
    --cc=luto@kernel.org \
    --cc=mingo@kernel.org \
    --cc=pavel@ucw.cz \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=will.deacon@arm.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.