linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: <linux-arch@vger.kernel.org>, <linux-efi@vger.kernel.org>,
	<kvm@vger.kernel.org>, <linux-doc@vger.kernel.org>,
	<x86@kernel.org>, <linux-kernel@vger.kernel.org>,
	<kasan-dev@googlegroups.com>, <linux-mm@kvack.org>,
	<iommu@lists.linux-foundation.org>
Cc: "Rik van Riel" <riel@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Larry Woodman" <lwoodman@redhat.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: [RFC PATCH v3 19/20] x86: Add support to make use of Secure Memory Encryption
Date: Wed, 9 Nov 2016 18:38:26 -0600	[thread overview]
Message-ID: <20161110003826.3280.5546.stgit@tlendack-t1.amdoffice.net> (raw)
In-Reply-To: <20161110003426.3280.2999.stgit@tlendack-t1.amdoffice.net>

This patch adds the support to check if SME has been enabled and if the
mem_encrypt=on command line option is set. If both of these conditions
are true, then the encryption mask is set and the kernel is encrypted
"in place."

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/kernel/Makefile           |    1 
 arch/x86/kernel/mem_encrypt_boot.S |  156 +++++++++++++++++++++++++++++
 arch/x86/kernel/mem_encrypt_init.c |  196 ++++++++++++++++++++++++++++++++++++
 3 files changed, 353 insertions(+)
 create mode 100644 arch/x86/kernel/mem_encrypt_boot.S

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 27e22f4..020759f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -143,4 +143,5 @@ ifeq ($(CONFIG_X86_64),y)
 	obj-y				+= vsmp_64.o
 
 	obj-y				+= mem_encrypt_init.o
+	obj-y				+= mem_encrypt_boot.o
 endif
diff --git a/arch/x86/kernel/mem_encrypt_boot.S b/arch/x86/kernel/mem_encrypt_boot.S
new file mode 100644
index 0000000..d4917ba
--- /dev/null
+++ b/arch/x86/kernel/mem_encrypt_boot.S
@@ -0,0 +1,156 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/processor-flags.h>
+#include <asm/msr-index.h>
+
+	.text
+	.code64
+ENTRY(sme_encrypt_execute)
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	/*
+	 * Entry parameters:
+	 *   RDI - virtual address for the encrypted kernel mapping
+	 *   RSI - virtual address for the un-encrypted kernel mapping
+	 *   RDX - length of kernel
+	 *   RCX - address of the encryption workarea
+	 *     - stack page (PAGE_SIZE)
+	 *     - encryption routine page (PAGE_SIZE)
+	 *     - intermediate copy buffer (PMD_PAGE_SIZE)
+	 *    R8 - address of the pagetables to use for encryption
+	 */
+
+	/* Set up a one page stack in the non-encrypted memory area */
+	movq	%rcx, %rax
+	addq	$PAGE_SIZE, %rax
+	movq	%rsp, %rbp
+	movq	%rax, %rsp
+	push	%rbp
+
+	push	%r12
+	push	%r13
+
+	movq	%rdi, %r10
+	movq	%rsi, %r11
+	movq	%rdx, %r12
+	movq	%rcx, %r13
+
+	/* Copy encryption routine into the workarea */
+	movq	%rax, %rdi
+	leaq	.Lencrypt_start(%rip), %rsi
+	movq	$(.Lencrypt_stop - .Lencrypt_start), %rcx
+	rep	movsb
+
+	/* Setup registers for call */
+	movq	%r10, %rdi
+	movq	%r11, %rsi
+	movq	%r8, %rdx
+	movq	%r12, %rcx
+	movq	%rax, %r8
+	addq	$PAGE_SIZE, %r8
+
+	/* Call the encryption routine */
+	call	*%rax
+
+	pop	%r13
+	pop	%r12
+
+	pop	%rsp			/* Restore original stack pointer */
+.Lencrypt_exit:
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
+
+	ret
+ENDPROC(sme_encrypt_execute)
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/*
+ * Routine used to encrypt kernel.
+ *   This routine must be run outside of the kernel proper since
+ *   the kernel will be encrypted during the process. So this
+ *   routine is defined here and then copied to an area outside
+ *   of the kernel where it will remain and run un-encrypted
+ *   during execution.
+ *
+ *   On entry the registers must be:
+ *     RDI - virtual address for the encrypted kernel mapping
+ *     RSI - virtual address for the un-encrypted kernel mapping
+ *     RDX - address of the pagetables to use for encryption
+ *     RCX - length of kernel
+ *      R8 - intermediate copy buffer
+ *
+ *     RAX - points to this routine
+ *
+ * The kernel will be encrypted by copying from the non-encrypted
+ * kernel space to an intermediate buffer and then copying from the
+ * intermediate buffer back to the encrypted kernel space. The physical
+ * addresses of the two kernel space mappings are the same which
+ * results in the kernel being encrypted "in place".
+ */
+.Lencrypt_start:
+	/* Enable the new page tables */
+	mov	%rdx, %cr3
+
+	/* Flush any global TLBs */
+	mov	%cr4, %rdx
+	andq	$~X86_CR4_PGE, %rdx
+	mov	%rdx, %cr4
+	orq	$X86_CR4_PGE, %rdx
+	mov	%rdx, %cr4
+
+	/* Set the PAT register PA5 entry to write-protect */
+	push	%rcx
+	movl	$MSR_IA32_CR_PAT, %ecx
+	rdmsr
+	push	%rdx			/* Save original PAT value */
+	andl	$0xffff00ff, %edx	/* Clear PA5 */
+	orl	$0x00000500, %edx	/* Set PA5 to WP */
+	wrmsr
+	pop	%rdx			/* RDX contains original PAT value */
+	pop	%rcx
+
+	movq	%rcx, %r9		/* Save length */
+	movq	%rdi, %r10		/* Save destination address */
+	movq	%rsi, %r11		/* Save source address */
+
+	wbinvd				/* Invalidate any cache entries */
+
+	/* Copy/encrypt 2MB at a time */
+1:
+	movq	%r11, %rsi
+	movq	%r8, %rdi
+	movq	$PMD_PAGE_SIZE, %rcx
+	rep	movsb
+
+	movq	%r8, %rsi
+	movq	%r10, %rdi
+	movq	$PMD_PAGE_SIZE, %rcx
+	rep	movsb
+
+	addq	$PMD_PAGE_SIZE, %r11
+	addq	$PMD_PAGE_SIZE, %r10
+	subq	$PMD_PAGE_SIZE, %r9
+	jnz	1b
+
+	/* Restore PAT register */
+	push	%rdx
+	movl	$MSR_IA32_CR_PAT, %ecx
+	rdmsr
+	pop	%rdx
+	wrmsr
+
+	ret
+.Lencrypt_stop:
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
index 388d6fb..7bdd159 100644
--- a/arch/x86/kernel/mem_encrypt_init.c
+++ b/arch/x86/kernel/mem_encrypt_init.c
@@ -13,9 +13,205 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/mem_encrypt.h>
+#include <linux/mm.h>
+
+#include <asm/sections.h>
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+extern void sme_encrypt_execute(unsigned long, unsigned long, unsigned long,
+				void *, pgd_t *);
+
+#define PGD_FLAGS	_KERNPG_TABLE_NO_ENC
+#define PUD_FLAGS	_KERNPG_TABLE_NO_ENC
+#define PMD_FLAGS	__PAGE_KERNEL_LARGE_EXEC
+
+static void __init *sme_pgtable_entry(pgd_t *pgd, void *next_page,
+				      void *vaddr, pmdval_t pmd_val)
+{
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd += pgd_index((unsigned long)vaddr);
+	if (pgd_none(*pgd)) {
+		pud = next_page;
+		memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
+		native_set_pgd(pgd,
+			       native_make_pgd((unsigned long)pud + PGD_FLAGS));
+		next_page += sizeof(*pud) * PTRS_PER_PUD;
+	} else {
+		pud = (pud_t *)(native_pgd_val(*pgd) & ~PTE_FLAGS_MASK);
+	}
+
+	pud += pud_index((unsigned long)vaddr);
+	if (pud_none(*pud)) {
+		pmd = next_page;
+		memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
+		native_set_pud(pud,
+			       native_make_pud((unsigned long)pmd + PUD_FLAGS));
+		next_page += sizeof(*pmd) * PTRS_PER_PMD;
+	} else {
+		pmd = (pmd_t *)(native_pud_val(*pud) & ~PTE_FLAGS_MASK);
+	}
+
+	pmd += pmd_index((unsigned long)vaddr);
+	if (pmd_none(*pmd) || !pmd_large(*pmd))
+		native_set_pmd(pmd, native_make_pmd(pmd_val));
+
+	return next_page;
+}
+
+static unsigned long __init sme_pgtable_calc(unsigned long start,
+					     unsigned long end)
+{
+	unsigned long addr, total;
+
+	total = 0;
+	addr = start;
+	while (addr < end) {
+		unsigned long pgd_end;
+
+		pgd_end = (addr & PGDIR_MASK) + PGDIR_SIZE;
+		if (pgd_end > end)
+			pgd_end = end;
+
+		total += sizeof(pud_t) * PTRS_PER_PUD * 2;
+
+		while (addr < pgd_end) {
+			unsigned long pud_end;
+
+			pud_end = (addr & PUD_MASK) + PUD_SIZE;
+			if (pud_end > end)
+				pud_end = end;
+
+			total += sizeof(pmd_t) * PTRS_PER_PMD * 2;
+
+			addr = pud_end;
+		}
+
+		addr = pgd_end;
+	}
+	total += sizeof(pgd_t) * PTRS_PER_PGD;
+
+	return total;
+}
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
 
 void __init sme_encrypt_kernel(void)
 {
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	pgd_t *pgd;
+	void *workarea, *next_page, *vaddr;
+	unsigned long kern_start, kern_end, kern_len;
+	unsigned long index, paddr, pmd_flags;
+	unsigned long exec_size, full_size;
+
+	/* If SME is not active then no need to prepare */
+	if (!sme_me_mask)
+		return;
+
+	/* Set the workarea to be after the kernel */
+	workarea = (void *)ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
+
+	/*
+	 * Prepare for encrypting the kernel by building new pagetables with
+	 * the necessary attributes needed to encrypt the kernel in place.
+	 *
+	 *   One range of virtual addresses will map the memory occupied
+	 *   by the kernel as encrypted.
+	 *
+	 *   Another range of virtual addresses will map the memory occupied
+	 *   by the kernel as un-encrypted and write-protected.
+	 *
+	 *     The use of write-protect attribute will prevent any of the
+	 *     memory from being cached.
+	 */
+
+	/* Physical address gives us the identity mapped virtual address */
+	kern_start = __pa_symbol(_text);
+	kern_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE) - 1;
+	kern_len = kern_end - kern_start + 1;
+
+	/*
+	 * Calculate required number of workarea bytes needed:
+	 *   executable encryption area size:
+	 *     stack page (PAGE_SIZE)
+	 *     encryption routine page (PAGE_SIZE)
+	 *     intermediate copy buffer (PMD_PAGE_SIZE)
+	 *   pagetable structures for workarea (in case not currently mapped)
+	 *   pagetable structures for the encryption of the kernel
+	 */
+	exec_size = (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
+
+	full_size = exec_size;
+	full_size += ALIGN(exec_size, PMD_PAGE_SIZE) / PMD_PAGE_SIZE *
+		     sizeof(pmd_t) * PTRS_PER_PMD;
+	full_size += sme_pgtable_calc(kern_start, kern_end + exec_size);
+
+	next_page = workarea + exec_size;
+
+	/* Make sure the current pagetables have entries for the workarea */
+	pgd = (pgd_t *)native_read_cr3();
+	paddr = (unsigned long)workarea;
+	while (paddr < (unsigned long)workarea + full_size) {
+		vaddr = (void *)paddr;
+		next_page = sme_pgtable_entry(pgd, next_page, vaddr,
+					      paddr + PMD_FLAGS);
+
+		paddr += PMD_PAGE_SIZE;
+	}
+	native_write_cr3(native_read_cr3());
+
+	/* Calculate a PGD index to be used for the un-encrypted mapping */
+	index = (pgd_index(kern_end + full_size) + 1) & (PTRS_PER_PGD - 1);
+	index <<= PGDIR_SHIFT;
+
+	/* Set and clear the PGD */
+	pgd = next_page;
+	memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
+	next_page += sizeof(*pgd) * PTRS_PER_PGD;
+
+	/* Add encrypted (identity) mappings for the kernel */
+	pmd_flags = PMD_FLAGS | _PAGE_ENC;
+	paddr = kern_start;
+	while (paddr < kern_end) {
+		vaddr = (void *)paddr;
+		next_page = sme_pgtable_entry(pgd, next_page, vaddr,
+					      paddr + pmd_flags);
+
+		paddr += PMD_PAGE_SIZE;
+	}
+
+	/* Add un-encrypted (non-identity) mappings for the kernel */
+	pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
+	paddr = kern_start;
+	while (paddr < kern_end) {
+		vaddr = (void *)(paddr + index);
+		next_page = sme_pgtable_entry(pgd, next_page, vaddr,
+					      paddr + pmd_flags);
+
+		paddr += PMD_PAGE_SIZE;
+	}
+
+	/* Add the workarea to both mappings */
+	paddr = kern_end + 1;
+	while (paddr < (kern_end + exec_size)) {
+		vaddr = (void *)paddr;
+		next_page = sme_pgtable_entry(pgd, next_page, vaddr,
+					      paddr + PMD_FLAGS);
+
+		vaddr = (void *)(paddr + index);
+		next_page = sme_pgtable_entry(pgd, next_page, vaddr,
+					      paddr + PMD_FLAGS);
+
+		paddr += PMD_PAGE_SIZE;
+	}
+
+	/* Perform the encryption */
+	sme_encrypt_execute(kern_start, kern_start + index, kern_len,
+			    workarea, pgd);
+
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
 }
 
 unsigned long __init sme_get_me_mask(void)

  parent reply	other threads:[~2016-11-10  0:53 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-10  0:34 [RFC PATCH v3 00/20] x86: Secure Memory Encryption (AMD) Tom Lendacky
2016-11-10  0:34 ` [RFC PATCH v3 01/20] x86: Documentation for AMD Secure Memory Encryption (SME) Tom Lendacky
2016-11-10 10:51   ` Borislav Petkov
2016-11-14 17:15     ` Tom Lendacky
2016-11-10  0:34 ` [RFC PATCH v3 02/20] x86: Set the write-protect cache mode for full PAT support Tom Lendacky
2016-11-10 13:14   ` Borislav Petkov
2016-11-11  1:26     ` Kani, Toshimitsu
2016-11-14 16:51       ` Tom Lendacky
2016-11-10  0:34 ` [RFC PATCH v3 03/20] x86: Add the Secure Memory Encryption cpu feature Tom Lendacky
2016-11-11 11:53   ` Borislav Petkov
2016-11-10  0:35 ` [RFC PATCH v3 04/20] x86: Handle reduction in physical address size with SME Tom Lendacky
2016-11-15 12:10   ` Joerg Roedel
2016-11-15 12:14     ` Borislav Petkov
2016-11-15 14:40       ` Tom Lendacky
2016-11-15 15:33         ` Borislav Petkov
2016-11-15 16:06           ` Tom Lendacky
2016-11-15 16:33             ` Borislav Petkov
2016-11-15 17:08               ` Tom Lendacky
2016-11-15 21:22       ` Tom Lendacky
2016-11-15 21:33         ` Borislav Petkov
2016-11-15 22:01           ` Tom Lendacky
2016-11-15 14:32     ` Tom Lendacky
2016-11-10  0:35 ` [RFC PATCH v3 05/20] x86: Add Secure Memory Encryption (SME) support Tom Lendacky
2016-11-10  0:35 ` [RFC PATCH v3 06/20] x86: Add support to enable SME during early boot processing Tom Lendacky
2016-11-14 17:29   ` Borislav Petkov
2016-11-14 18:18     ` Tom Lendacky
2016-11-14 20:01       ` Borislav Petkov
2016-11-10  0:35 ` [RFC PATCH v3 07/20] x86: Provide general kernel support for memory encryption Tom Lendacky
2016-11-10  0:36 ` [RFC PATCH v3 08/20] x86: Add support for early encryption/decryption of memory Tom Lendacky
2016-11-16 10:46   ` Borislav Petkov
2016-11-16 19:22     ` Tom Lendacky
2016-11-10  0:36 ` [RFC PATCH v3 09/20] x86: Insure that boot memory areas are mapped properly Tom Lendacky
2016-11-17 12:20   ` Borislav Petkov
2016-11-19 18:12     ` Tom Lendacky
2016-11-10  0:36 ` [RFC PATCH v3 10/20] Add support to access boot related data in the clear Tom Lendacky
2016-11-11 16:17   ` Kani, Toshimitsu
2016-11-14 16:24     ` Tom Lendacky
2016-11-17 15:55   ` Borislav Petkov
2016-11-19 18:33     ` Tom Lendacky
2016-11-20 23:04       ` Borislav Petkov
2016-12-07 13:19   ` Matt Fleming
2016-12-09 14:26     ` Tom Lendacky
2016-11-10  0:36 ` [RFC PATCH v3 11/20] x86: Add support for changing memory encryption attribute Tom Lendacky
2016-11-17 17:39   ` Borislav Petkov
2016-11-19 18:48     ` Tom Lendacky
2016-11-21  8:27       ` Borislav Petkov
2016-11-10  0:37 ` [RFC PATCH v3 12/20] x86: Decrypt trampoline area if memory encryption is active Tom Lendacky
2016-11-17 18:09   ` Borislav Petkov
2016-11-19 18:50     ` Tom Lendacky
2016-11-10  0:37 ` [RFC PATCH v3 13/20] x86: DMA support for memory encryption Tom Lendacky
2016-11-15 14:39   ` Radim Krčmář
2016-11-15 17:02     ` Tom Lendacky
2016-11-15 18:17       ` Radim Krčmář
2016-11-15 20:33         ` Tom Lendacky
2016-11-15 15:16   ` Michael S. Tsirkin
2016-11-15 18:29     ` Tom Lendacky
2016-11-15 19:16       ` Michael S. Tsirkin
2016-11-22 11:38       ` Borislav Petkov
2016-11-22 15:22         ` Michael S. Tsirkin
2016-11-22 15:41           ` Borislav Petkov
2016-11-22 20:41             ` Michael S. Tsirkin
2016-11-10  0:37 ` [RFC PATCH v3 14/20] iommu/amd: Disable AMD IOMMU if memory encryption is active Tom Lendacky
2016-11-14 16:32   ` Joerg Roedel
2016-11-14 16:48     ` Tom Lendacky
2016-11-10  0:37 ` [RFC PATCH v3 15/20] x86: Check for memory encryption on the APs Tom Lendacky
2016-11-22 19:25   ` Borislav Petkov
2016-11-29 18:00     ` Tom Lendacky
2016-11-10  0:37 ` [RFC PATCH v3 16/20] x86: Do not specify encrypted memory for video mappings Tom Lendacky
2016-11-10  0:38 ` [RFC PATCH v3 17/20] x86/kvm: Enable Secure Memory Encryption of nested page tables Tom Lendacky
2016-11-10  0:38 ` [RFC PATCH v3 18/20] x86: Access the setup data through debugfs un-encrypted Tom Lendacky
2016-11-10  0:38 ` Tom Lendacky [this message]
2016-11-24 12:50   ` [RFC PATCH v3 19/20] x86: Add support to make use of Secure Memory Encryption Borislav Petkov
2016-11-29 18:40     ` Tom Lendacky
2016-11-10  0:38 ` [RFC PATCH v3 20/20] " Tom Lendacky
2016-11-22 18:58   ` Borislav Petkov
2016-11-26 20:47   ` Borislav Petkov
2016-11-29 18:48     ` Tom Lendacky
2016-11-29 19:56       ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161110003826.3280.5546.stgit@tlendack-t1.amdoffice.net \
    --to=thomas.lendacky@amd.com \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kasan-dev@googlegroups.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=lwoodman@redhat.com \
    --cc=matt@codeblueprint.co.uk \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=riel@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).