linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dmitry Safonov <dima@arista.com>
To: linux-kernel@vger.kernel.org
Cc: Dmitry Safonov <0x7f454c46@gmail.com>,
	Dmitry Safonov <dima@arista.com>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andy Lutomirski <luto@kernel.org>, Arnd Bergmann <arnd@arndb.de>,
	Borislav Petkov <bp@alien8.de>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Guo Ren <guoren@kernel.org>, "H. Peter Anvin" <hpa@zytor.com>,
	Ingo Molnar <mingo@redhat.com>, Oleg Nesterov <oleg@redhat.com>,
	Russell King <linux@armlinux.org.uk>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Thomas Gleixner <tglx@linutronix.de>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Will Deacon <will@kernel.org>,
	x86@kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Paul Mackerras <paulus@samba.org>
Subject: [PATCH v3 22/23] powerpc/vdso: Migrate native signals to generic vdso_base
Date: Fri, 11 Jun 2021 19:02:41 +0100	[thread overview]
Message-ID: <20210611180242.711399-23-dima@arista.com> (raw)
In-Reply-To: <20210611180242.711399-1-dima@arista.com>

Generic way to track the land vma area.
Stat speaks for itself.

Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Dmitry Safonov <dima@arista.com>
---
 arch/powerpc/Kconfig                          |  1 +
 arch/powerpc/include/asm/book3s/32/mmu-hash.h |  1 -
 arch/powerpc/include/asm/book3s/64/mmu.h      |  1 -
 arch/powerpc/include/asm/mmu_context.h        |  9 ------
 arch/powerpc/include/asm/nohash/32/mmu-40x.h  |  1 -
 arch/powerpc/include/asm/nohash/32/mmu-44x.h  |  1 -
 arch/powerpc/include/asm/nohash/32/mmu-8xx.h  |  1 -
 arch/powerpc/include/asm/nohash/mmu-book3e.h  |  1 -
 arch/powerpc/kernel/signal_32.c               |  8 ++---
 arch/powerpc/kernel/signal_64.c               |  4 +--
 arch/powerpc/kernel/vdso.c                    | 31 +------------------
 arch/powerpc/perf/callchain_32.c              |  8 ++---
 arch/powerpc/perf/callchain_64.c              |  4 +--
 arch/x86/include/asm/mmu_context.h            |  5 ---
 include/asm-generic/mm_hooks.h                |  9 ++----
 mm/mmap.c                                     |  7 -----
 16 files changed, 16 insertions(+), 76 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a9f842230ee4..21e58d145c82 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UACCESS_FLUSHCACHE
 	select ARCH_HAS_UBSAN_SANITIZE_ALL
+	select ARCH_HAS_VDSO_BASE
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_KEEP_MEMBLOCK
 	select ARCH_MIGHT_HAVE_PC_PARPORT
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index b85f8e114a9c..d5ee68f394d9 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -90,7 +90,6 @@ struct hash_pte {
 
 typedef struct {
 	unsigned long id;
-	void __user *vdso;
 } mm_context_t;
 
 void update_bats(void);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index eace8c3f7b0a..66bcc3ee3add 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -111,7 +111,6 @@ typedef struct {
 
 	struct hash_mm_context *hash_context;
 
-	void __user *vdso;
 	/*
 	 * pagetable fragment support
 	 */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 4bc45d3ed8b0..71dedeac7fdb 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -260,15 +260,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 
 extern void arch_exit_mmap(struct mm_struct *mm);
 
-static inline void arch_unmap(struct mm_struct *mm,
-			      unsigned long start, unsigned long end)
-{
-	unsigned long vdso_base = (unsigned long)mm->context.vdso;
-
-	if (start <= vdso_base && vdso_base < end)
-		mm->context.vdso = NULL;
-}
-
 #ifdef CONFIG_PPC_MEM_KEYS
 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
 			       bool execute, bool foreign);
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
index 8a8f13a22cf4..366088bb1c3f 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
@@ -57,7 +57,6 @@
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	void __user	*vdso;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 2d92a39d8f2e..d67256ab7887 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -108,7 +108,6 @@ extern unsigned int tlb_44x_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	void __user	*vdso;
 } mm_context_t;
 
 /* patch sites */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 6e4faa0a9b35..9e394810faac 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -184,7 +184,6 @@ void mmu_pin_tlb(unsigned long top, bool readonly);
 typedef struct {
 	unsigned int id;
 	unsigned int active;
-	void __user *vdso;
 	void *pte_frag;
 } mm_context_t;
 
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index e43a418d3ccd..61ac19f315e5 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -238,7 +238,6 @@ extern unsigned int tlbcam_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	void __user	*vdso;
 } mm_context_t;
 
 /* Page size definitions, common between 32 and 64-bit
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 8f05ed0da292..ae61c480af53 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -824,8 +824,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
 	}
 
 	/* Save user registers on the stack */
-	if (tsk->mm->context.vdso) {
-		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
+	if (tsk->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE) {
+		tramp = VDSO32_SYMBOL(tsk->mm->vdso_base, sigtramp_rt32);
 	} else {
 		tramp = (unsigned long)mctx->mc_pad;
 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
@@ -922,8 +922,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
 	else
 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
 
-	if (tsk->mm->context.vdso) {
-		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
+	if (tsk->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE) {
+		tramp = VDSO32_SYMBOL(tsk->mm->vdso_base, sigtramp32);
 	} else {
 		tramp = (unsigned long)mctx->mc_pad;
 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index dca66481d0c2..468866dc1e0e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -906,8 +906,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
 	tsk->thread.fp_state.fpscr = 0;
 
 	/* Set up to return from userspace. */
-	if (tsk->mm->context.vdso) {
-		regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64);
+	if (tsk->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE) {
+		regs->nip = VDSO64_SYMBOL(tsk->mm->vdso_base, sigtramp_rt64);
 	} else {
 		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
 		if (err)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 6d6e575630c1..2080a0540537 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -57,29 +57,6 @@ enum vvar_pages {
 	VVAR_NR_PAGES,
 };
 
-static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
-		       unsigned long text_size)
-{
-	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
-
-	if (new_size != text_size)
-		return -EINVAL;
-
-	current->mm->context.vdso = (void __user *)new_vma->vm_start;
-
-	return 0;
-}
-
-static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
-{
-	return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
-}
-
-static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
-{
-	return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
-}
-
 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 			     struct vm_area_struct *vma, struct vm_fault *vmf);
 
@@ -90,12 +67,10 @@ static struct vm_special_mapping vvar_spec __ro_after_init = {
 
 static struct vm_special_mapping vdso32_spec __ro_after_init = {
 	.name = "[vdso]",
-	.mremap = vdso32_mremap,
 };
 
 static struct vm_special_mapping vdso64_spec __ro_after_init = {
 	.name = "[vdso]",
-	.mremap = vdso64_mremap,
 };
 
 #ifdef CONFIG_TIME_NS
@@ -251,7 +226,7 @@ static int __arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
 	if (IS_ERR(vma)) {
 		do_munmap(mm, vdso_base, vvar_size, NULL);
 	} else {
-		mm->context.vdso = (void __user *)vdso_base + vvar_size;
+		mm->vdso_base = (void __user *)vdso_base + vvar_size;
 		*sysinfo_ehdr = vdso_base + vvar_size;
 	}
 
@@ -263,14 +238,10 @@ int arch_setup_additional_pages(unsigned long *sysinfo_ehdr)
 	struct mm_struct *mm = current->mm;
 	int rc;
 
-	mm->context.vdso = NULL;
-
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
 
 	rc = __arch_setup_additional_pages(sysinfo_ehdr);
-	if (rc)
-		mm->context.vdso = NULL;
 
 	mmap_write_unlock(mm);
 	return rc;
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index b83c47b7947f..c48b63e16603 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
 		return 1;
-	if (current->mm->context.vdso &&
-	    nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp32))
+	if (current->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE &&
+	    nip == VDSO32_SYMBOL(current->mm->vdso_base, sigtramp32))
 		return 1;
 	return 0;
 }
@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
 	if (nip == fp + offsetof(struct rt_signal_frame_32,
 				 uc.uc_mcontext.mc_pad))
 		return 1;
-	if (current->mm->context.vdso &&
-	    nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp_rt32))
+	if (current->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE &&
+	    nip == VDSO32_SYMBOL(current->mm->vdso_base, sigtramp_rt32))
 		return 1;
 	return 0;
 }
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index 8d0df4226328..ef7116bd525a 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -68,8 +68,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_64, tramp))
 		return 1;
-	if (current->mm->context.vdso &&
-	    nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64))
+	if (current->mm->vdso_base != (void __user *)UNMAPPED_VDSO_BASE &&
+	    nip == VDSO64_SYMBOL(current->mm->vdso_base, sigtramp_rt64))
 		return 1;
 	return 0;
 }
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 27516046117a..394aeaf136bb 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -190,11 +190,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
 }
 #endif
 
-static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
-			      unsigned long end)
-{
-}
-
 /*
  * We only want to enforce protection keys on the current process
  * because we effectively have no access to PKRU for other
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6cd41034743d 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
+ * Define generic no-op hooks for arch_dup_mmap() and arch_exit_mmap()
+ * to be included in asm-FOO/mmu_context.h for any
  * arch FOO which doesn't need to hook these.
  */
 #ifndef _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
 {
 }
 
-static inline void arch_unmap(struct mm_struct *mm,
-			unsigned long start, unsigned long end)
-{
-}
-
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 		bool write, bool execute, bool foreign)
 {
diff --git a/mm/mmap.c b/mm/mmap.c
index 5d1ffce51119..d22eb9ab770c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2821,13 +2821,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	if (len == 0)
 		return -EINVAL;
 
-	/*
-	 * arch_unmap() might do unmaps itself.  It must be called
-	 * and finish any rbtree manipulation before this code
-	 * runs and also starts to manipulate the rbtree.
-	 */
-	arch_unmap(mm, start, end);
-
 	/* Find the first overlapping VMA */
 	vma = find_vma(mm, start);
 	if (!vma)
-- 
2.31.1


  parent reply	other threads:[~2021-06-11 18:04 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-11 18:02 [PATCH v3 00/23] Add generic vdso_base tracking Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 01/23] x86/elf: Check in_x32_syscall() in compat_arch_setup_additional_pages() Dmitry Safonov
2021-06-19 20:41   ` Thomas Gleixner
2021-06-21 20:59     ` Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 02/23] elf: Move arch_setup_additional_pages() to generic elf.h Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 03/23] arm/elf: Remove needless ifdef CONFIG_MMU Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 04/23] arm64: Use in_compat_task() in arch_setup_additional_pages() Dmitry Safonov
2021-06-15 10:21   ` Will Deacon
2021-06-11 18:02 ` [PATCH v3 05/23] x86: Remove compat_arch_setup_additional_pages() Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 06/23] elf: " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 07/23] vdso: Set mm->context.vdso only on success of _install_special_mapping() Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 08/23] elf/vdso: Modify arch_setup_additional_pages() parameters Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 09/23] elf: Use sysinfo_ehdr in ARCH_DLINFO() Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 10/23] arm/vdso: Remove vdso pointer from mm->context Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 11/23] s390/vdso: Remove vdso_base " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 12/23] sparc/vdso: Remove vdso " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 13/23] mm/mmap: Make vm_special_mapping::mremap return void Dmitry Safonov
2021-06-17  7:20   ` Christophe Leroy
2021-06-21 21:12     ` Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 14/23] x86/signal: Land on &frame->retcode when vdso isn't mapped Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 15/23] x86/signal: Check if vdso_image_32 is mapped before trying to land on it Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 16/23] mm: Add vdso_base in mm_struct Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 17/23] x86/vdso: Migrate to generic vdso_base Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 18/23] arm/vdso: " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 19/23] arm64/vdso: Migrate compat signals " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 20/23] arm64/vdso: Migrate native " Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 21/23] mips/vdso: Migrate " Dmitry Safonov
2021-06-11 18:02 ` Dmitry Safonov [this message]
2021-06-15 12:52   ` [PATCH v3 22/23] powerpc/vdso: Migrate native signals " Michael Ellerman
2021-06-17  6:30   ` Christophe Leroy
2021-06-17  6:36   ` Christophe Leroy
2021-06-17  7:34     ` Christophe Leroy
2021-06-21 21:22       ` Dmitry Safonov
2021-06-11 18:02 ` [PATCH v3 23/23] x86/vdso/selftest: Add a test for unmapping vDSO Dmitry Safonov
2021-06-11 18:21   ` Shuah Khan
2021-06-11 18:37     ` Dmitry Safonov
2021-06-11 18:43       ` Shuah Khan
2021-06-17  9:13 ` [PATCH v3 00/23] Add generic vdso_base tracking Christophe Leroy
2021-06-21 21:57   ` Dmitry Safonov
2022-03-09 15:41 ` Christophe Leroy
2022-03-10 21:17   ` Dmitry Safonov
2022-08-19  9:17     ` Christophe Leroy
2022-08-23 19:13       ` Dmitry Safonov
2023-10-11 10:28         ` Christophe Leroy
2023-10-11 23:20 ` H. Peter Anvin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210611180242.711399-23-dima@arista.com \
    --to=dima@arista.com \
    --cc=0x7f454c46@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=guoren@kernel.org \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=oleg@redhat.com \
    --cc=paulus@samba.org \
    --cc=tglx@linutronix.de \
    --cc=tsbogend@alpha.franken.de \
    --cc=vincenzo.frascino@arm.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).