linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Peter Zijlstra <peterz@infradead.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"minchan.kim@gmail.com" <minchan.kim@gmail.com>,
	cl@linux-foundation.org,
	"hugh.dickins" <hugh.dickins@tiscali.co.uk>,
	Nick Piggin <nickpiggin@yahoo.com.au>,
	Ingo Molnar <mingo@elte.hu>,
	Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [RFC][PATCH 5/8] mm: Speculative pte_map_lock()
Date: Mon, 04 Jan 2010 19:24:34 +0100	[thread overview]
Message-ID: <20100104182813.599032632@chello.nl> (raw)
In-Reply-To: 20100104182429.833180340@chello.nl

[-- Attachment #1: mm-foo-7.patch --]
[-- Type: text/plain, Size: 11649 bytes --]

Implement pte_map_lock(.flags & FAULT_FLAG_SPECULATIVE), in which case
we're not holding mmap_sem, so we can race against umap() and similar
routines.

Since we cannot rely on pagetable stability in the face of unmap, we
use the technique fast_gup() also uses for a lockless pagetable
lookup. For this we introduce the {un,}pin_page_tables() functions.

The only problem is that we do TLB flushes while holding the PTL,
which in turn means that we cannot acquire the PTL while having IRQs
disabled.

Fudge around this by open-coding a spinner which drops the page-table
pin, which on x86 will be IRQ-disable (that holds of the TLB flush,
which is done before freeing the pagetables).

Once we hold the PTL, we can validate the VMA, if that is still valid
we know we're good to go and holding the PTL will hold off unmap.

We need to propagate the VMA sequence count through the fault code.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 include/linux/mm.h |    2 
 mm/memory.c        |  111 ++++++++++++++++++++++++++++++++++++++---------------
 mm/util.c          |   12 ++++-
 3 files changed, 93 insertions(+), 32 deletions(-)

Index: linux-2.6/include/linux/mm.h
===================================================================
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
@@ -848,6 +848,8 @@ int get_user_pages(struct task_struct *t
 			struct page **pages, struct vm_area_struct **vmas);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 			struct page **pages);
+void pin_page_tables(void);
+void unpin_page_tables(void);
 struct page *get_dump_page(unsigned long addr);
 
 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
Index: linux-2.6/mm/memory.c
===================================================================
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1959,10 +1959,56 @@ static inline void cow_user_page(struct 
 
 static int pte_map_lock(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd, unsigned int flags,
-		pte_t **ptep, spinlock_t **ptl)
+		unsigned int seq, pte_t **ptep, spinlock_t **ptlp)
 {
-	*ptep = pte_offset_map_lock(mm, pmd, address, ptl);
+	pgd_t *pgd;
+	pud_t *pud;
+
+	if (!(flags & FAULT_FLAG_SPECULATIVE)) {
+		*ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
+		return 1;
+	}
+
+again:
+	pin_page_tables();
+
+	pgd = pgd_offset(mm, address);
+	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+		goto out;
+
+	if (pmd_huge(*pmd))
+		goto out;
+
+	*ptlp = pte_lockptr(mm, pmd);
+	*ptep = pte_offset_map(pmd, address);
+	if (!spin_trylock(*ptlp)) {
+		pte_unmap(*ptep);
+		unpin_page_tables();
+		goto again;
+	}
+
+	if (!*ptep)
+		goto out;
+
+	if (vma_is_dead(vma, seq))
+		goto unlock;
+
+	unpin_page_tables();
 	return 1;
+
+unlock:
+	pte_unmap_unlock(*ptep, *ptlp);
+out:
+	unpin_page_tables();
+	return 0;
 }
 
 /*
@@ -1985,7 +2031,8 @@ static int pte_map_lock(struct mm_struct
  */
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
-		spinlock_t *ptl, unsigned int flags, pte_t orig_pte)
+		spinlock_t *ptl, unsigned int flags, pte_t orig_pte,
+		unsigned int seq)
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
@@ -2018,7 +2065,7 @@ static int do_wp_page(struct mm_struct *
 			pte_unmap_unlock(page_table, ptl);
 			lock_page(old_page);
 
-			if (!pte_map_lock(mm, vma, address, pmd, flags,
+			if (!pte_map_lock(mm, vma, address, pmd, flags, seq,
 						&page_table, &ptl)) {
 				unlock_page(old_page);
 				ret = VM_FAULT_RETRY;
@@ -2084,7 +2131,7 @@ static int do_wp_page(struct mm_struct *
 			 * they did, we just return, as we can count on the
 			 * MMU to tell us if they didn't also make it writable.
 			 */
-			if (!pte_map_lock(mm, vma, address, pmd, flags,
+			if (!pte_map_lock(mm, vma, address, pmd, flags, seq,
 						&page_table, &ptl)) {
 				unlock_page(old_page);
 				ret = VM_FAULT_RETRY;
@@ -2161,7 +2208,7 @@ gotten:
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	if (!pte_map_lock(mm, vma, address, pmd, flags, &page_table, &ptl)) {
+	if (!pte_map_lock(mm, vma, address, pmd, flags, seq, &page_table, &ptl)) {
 		mem_cgroup_uncharge_page(new_page);
 		ret = VM_FAULT_RETRY;
 		goto err_free_new;
@@ -2511,8 +2558,8 @@ int vmtruncate_range(struct inode *inode
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pmd_t *pmd,
-		unsigned int flags, pte_t orig_pte)
+		unsigned long address, pmd_t *pmd, unsigned int flags,
+		pte_t orig_pte, unsigned int seq)
 {
 	spinlock_t *ptl;
 	struct page *page;
@@ -2548,7 +2595,7 @@ static int do_swap_page(struct mm_struct
 			 * Back out if somebody else faulted in this pte
 			 * while we released the pte lock.
 			 */
-			if (!pte_map_lock(mm, vma, address, pmd, flags,
+			if (!pte_map_lock(mm, vma, address, pmd, flags, seq,
 						&page_table, &ptl)) {
 				ret = VM_FAULT_RETRY;
 				goto out;
@@ -2589,7 +2636,7 @@ static int do_swap_page(struct mm_struct
 	/*
 	 * Back out if somebody else already faulted in this pte.
 	 */
-	if (!pte_map_lock(mm, vma, address, pmd, flags, &page_table, &ptl)) {
+	if (!pte_map_lock(mm, vma, address, pmd, flags, seq, &page_table, &ptl)) {
 		ret = VM_FAULT_RETRY;
 		goto out_nolock;
 	}
@@ -2634,7 +2681,8 @@ static int do_swap_page(struct mm_struct
 	unlock_page(page);
 
 	if (flags & FAULT_FLAG_WRITE) {
-		ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, flags, pte);
+		ret |= do_wp_page(mm, vma, address, page_table, pmd,
+				ptl, flags, pte, seq);
 		if (ret & VM_FAULT_ERROR)
 			ret &= VM_FAULT_ERROR;
 		goto out;
@@ -2663,7 +2711,8 @@ out_release:
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pmd_t *pmd, unsigned int flags)
+		unsigned long address, pmd_t *pmd, unsigned int flags,
+		unsigned int seq)
 {
 	struct page *page;
 	spinlock_t *ptl;
@@ -2672,7 +2721,7 @@ static int do_anonymous_page(struct mm_s
 	if (!(flags & FAULT_FLAG_WRITE)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
 						vma->vm_page_prot));
-		if (!pte_map_lock(mm, vma, address, pmd, flags,
+		if (!pte_map_lock(mm, vma, address, pmd, flags, seq,
 					&page_table, &ptl))
 			return VM_FAULT_RETRY;
 		if (!pte_none(*page_table))
@@ -2697,7 +2746,7 @@ static int do_anonymous_page(struct mm_s
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
-	if (!pte_map_lock(mm, vma, address, pmd, flags, &page_table, &ptl)) {
+	if (!pte_map_lock(mm, vma, address, pmd, flags, seq, &page_table, &ptl)) {
 		mem_cgroup_uncharge_page(page);
 		page_cache_release(page);
 		return VM_FAULT_RETRY;
@@ -2740,8 +2789,8 @@ oom:
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-		unsigned long address, pmd_t *pmd,
-		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+		unsigned long address, pmd_t *pmd, pgoff_t pgoff,
+		unsigned int flags, pte_t orig_pte, unsigned int seq)
 {
 	pte_t *page_table;
 	spinlock_t *ptl;
@@ -2841,7 +2890,7 @@ static int __do_fault(struct mm_struct *
 
 	}
 
-	if (!pte_map_lock(mm, vma, address, pmd, flags, &page_table, &ptl)) {
+	if (!pte_map_lock(mm, vma, address, pmd, flags, seq, &page_table, &ptl)) {
 		ret = VM_FAULT_RETRY;
 		goto out_uncharge;
 	}
@@ -2923,12 +2972,12 @@ unwritable_page:
 
 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd,
-		unsigned int flags, pte_t orig_pte)
+		unsigned int flags, pte_t orig_pte, unsigned int seq)
 {
 	pgoff_t pgoff = (((address & PAGE_MASK)
 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte, seq);
 }
 
 /*
@@ -2942,7 +2991,7 @@ static int do_linear_fault(struct mm_str
  */
 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd,
-		unsigned int flags, pte_t orig_pte)
+		unsigned int flags, pte_t orig_pte, unsigned int seq)
 {
 	pgoff_t pgoff;
 
@@ -2957,7 +3006,7 @@ static int do_nonlinear_fault(struct mm_
 	}
 
 	pgoff = pte_to_pgoff(orig_pte);
-	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte, seq);
 }
 
 /*
@@ -2975,7 +3024,8 @@ static int do_nonlinear_fault(struct mm_
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long address,
-		pte_t entry, pmd_t *pmd, unsigned int flags)
+		pte_t entry, pmd_t *pmd, unsigned int flags,
+		unsigned int seq)
 {
 	spinlock_t *ptl;
 	pte_t *pte;
@@ -2985,26 +3035,27 @@ static inline int handle_pte_fault(struc
 			if (vma->vm_ops) {
 				if (likely(vma->vm_ops->fault))
 					return do_linear_fault(mm, vma, address,
-						pmd, flags, entry);
+						pmd, flags, entry, seq);
 			}
 			return do_anonymous_page(mm, vma, address,
-						 pmd, flags);
+						 pmd, flags, seq);
 		}
 		if (pte_file(entry))
 			return do_nonlinear_fault(mm, vma, address,
-					pmd, flags, entry);
+					pmd, flags, entry, seq);
 		return do_swap_page(mm, vma, address,
-					pmd, flags, entry);
+					pmd, flags, entry, seq);
 	}
 
-	if (!pte_map_lock(mm, vma, address, pmd, flags, &pte, &ptl))
+	if (!pte_map_lock(mm, vma, address, pmd, flags, seq, &pte, &ptl))
 		return VM_FAULT_RETRY;
 	if (unlikely(!pte_same(*pte, entry)))
 		goto unlock;
 	if (flags & FAULT_FLAG_WRITE) {
-		if (!pte_write(entry))
+		if (!pte_write(entry)) {
 			return do_wp_page(mm, vma, address,
-					pte, pmd, ptl, flags, entry);
+					pte, pmd, ptl, flags, entry, seq);
+		}
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
@@ -3058,7 +3109,7 @@ int handle_mm_fault(struct mm_struct *mm
 
 	pte_unmap(pte);
 
-	return handle_pte_fault(mm, vma, address, entry, pmd, flags);
+	return handle_pte_fault(mm, vma, address, entry, pmd, flags, 0);
 }
 
 #ifndef __PAGETABLE_PUD_FOLDED
Index: linux-2.6/mm/util.c
===================================================================
--- linux-2.6.orig/mm/util.c
+++ linux-2.6/mm/util.c
@@ -253,8 +253,8 @@ void arch_pick_mmap_layout(struct mm_str
  * callers need to carefully consider what to use. On many architectures,
  * get_user_pages_fast simply falls back to get_user_pages.
  */
-int __attribute__((weak)) get_user_pages_fast(unsigned long start,
-				int nr_pages, int write, struct page **pages)
+int __weak get_user_pages_fast(unsigned long start,
+			       int nr_pages, int write, struct page **pages)
 {
 	struct mm_struct *mm = current->mm;
 	int ret;
@@ -268,6 +268,14 @@ int __attribute__((weak)) get_user_pages
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+void __weak pin_page_tables(void)
+{
+}
+
+void __weak unpin_page_tables(void)
+{
+}
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);

-- 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-01-04 20:49 UTC|newest]

Thread overview: 121+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-01-04 18:24 [RFC][PATCH 0/8] Speculative pagefault -v3 Peter Zijlstra
2010-01-04 18:24 ` [RFC][PATCH 1/8] mm: Remove pte reference from fault path Peter Zijlstra
2010-01-04 18:24 ` [RFC][PATCH 2/8] mm: Speculative pagefault infrastructure Peter Zijlstra
2010-01-04 18:24 ` [RFC][PATCH 3/8] mm: Add vma sequence count Peter Zijlstra
2010-01-04 18:24 ` [RFC][PATCH 4/8] mm: RCU free vmas Peter Zijlstra
2010-01-05  2:43   ` Paul E. McKenney
2010-01-05  8:28     ` Peter Zijlstra
2010-01-05 16:05       ` Paul E. McKenney
2010-01-04 18:24 ` Peter Zijlstra [this message]
2010-01-04 18:24 ` [RFC][PATCH 6/8] mm: handle_speculative_fault() Peter Zijlstra
2010-01-05  0:25   ` KAMEZAWA Hiroyuki
2010-01-05  3:13     ` Linus Torvalds
2010-01-05  8:17       ` Peter Zijlstra
2010-01-05  8:57       ` Peter Zijlstra
2010-01-05 15:34         ` Linus Torvalds
2010-01-05 15:40           ` Al Viro
2010-01-05 16:10             ` Linus Torvalds
2010-01-06 15:41               ` Peter Zijlstra
2010-01-05  9:37       ` Peter Zijlstra
2010-01-05 23:35         ` Linus Torvalds
2010-01-05  4:29     ` Minchan Kim
2010-01-05  4:43       ` KAMEZAWA Hiroyuki
2010-01-05  5:10         ` Linus Torvalds
2010-01-05  5:30           ` KAMEZAWA Hiroyuki
2010-01-05  7:39             ` KAMEZAWA Hiroyuki
2010-01-05 15:26               ` Linus Torvalds
2010-01-05 16:14                 ` Linus Torvalds
2010-01-05 17:25                   ` Andi Kleen
2010-01-05 17:47                     ` Christoph Lameter
2010-01-05 18:00                       ` Andi Kleen
2010-01-05 17:55                     ` Linus Torvalds
2010-01-05 18:13                       ` Christoph Lameter
2010-01-05 18:25                         ` Linus Torvalds
2010-01-05 18:46                           ` Christoph Lameter
2010-01-05 18:56                             ` Linus Torvalds
2010-01-05 19:15                               ` Christoph Lameter
2010-01-05 19:28                                 ` Linus Torvalds
2010-01-05 18:55                           ` Paul E. McKenney
2010-01-05 19:08                             ` Linus Torvalds
2010-01-05 19:23                               ` Paul E. McKenney
2010-01-05 20:29                           ` Peter Zijlstra
2010-01-05 20:46                             ` Linus Torvalds
2010-01-05 21:00                               ` Linus Torvalds
2010-01-05 23:29                             ` Paul E. McKenney
2010-01-06  0:22                 ` KAMEZAWA Hiroyuki
2010-01-06  1:37                   ` Linus Torvalds
2010-01-06  2:52                     ` KAMEZAWA Hiroyuki
2010-01-06  3:27                       ` Linus Torvalds
2010-01-06  3:56                         ` KAMEZAWA Hiroyuki
2010-01-06  4:20                           ` Linus Torvalds
2010-01-06  7:06                             ` KAMEZAWA Hiroyuki
2010-01-06  7:49                               ` Minchan Kim
2010-01-06  9:39                               ` Linus Torvalds
2010-01-07  1:00                                 ` KAMEZAWA Hiroyuki
2010-01-08 16:53                             ` Peter Zijlstra
2010-01-08 17:22                               ` Linus Torvalds
2010-01-08 17:43                                 ` Christoph Lameter
2010-01-08 17:52                                   ` Linus Torvalds
2010-01-08 18:33                                     ` Christoph Lameter
2010-01-08 18:46                                   ` Andi Kleen
2010-01-08 18:56                                     ` Christoph Lameter
2010-01-08 19:10                                       ` Andi Kleen
2010-01-08 19:11                                       ` Linus Torvalds
2010-01-08 19:28                                         ` Andi Kleen
2010-01-08 19:39                                           ` Linus Torvalds
2010-01-08 19:42                                             ` Linus Torvalds
2010-01-08 21:36                                   ` Linus Torvalds
2010-01-08 21:46                                     ` Christoph Lameter
2010-01-08 22:43                                       ` Linus Torvalds
2010-01-08 22:43                                       ` Linus Torvalds
2010-01-09 14:47                               ` Ed Tomlinson
2010-01-10  5:27                                 ` Nitin Gupta
2010-01-05 15:14             ` Christoph Lameter
2010-01-05  8:18           ` Peter Zijlstra
2010-01-05  6:00         ` Minchan Kim
2010-01-05  4:48       ` Linus Torvalds
2010-01-05  6:09         ` Minchan Kim
2010-01-05  6:09           ` KAMEZAWA Hiroyuki
2010-01-05  6:24             ` Minchan Kim
2010-01-05  8:35           ` Peter Zijlstra
2010-01-05 13:45   ` Arjan van de Ven
2010-01-05 14:15     ` Andi Kleen
2010-01-05 15:17     ` Christoph Lameter
2010-01-06  3:22       ` Arjan van de Ven
2010-01-07 16:11         ` Christoph Lameter
2010-01-07 16:19           ` Linus Torvalds
2010-01-07 16:31             ` Linus Torvalds
2010-01-07 16:34             ` Paul E. McKenney
2010-01-07 16:36             ` Christoph Lameter
2010-01-08  4:49               ` Arjan van de Ven
2010-01-08  5:00                 ` Linus Torvalds
2010-01-08 15:51                 ` Christoph Lameter
2010-01-09 15:55                   ` Arjan van de Ven
2010-01-07 17:22             ` Peter Zijlstra
2010-01-07 17:36               ` Linus Torvalds
2010-01-07 17:49                 ` Linus Torvalds
2010-01-07 18:00                   ` Peter Zijlstra
2010-01-07 18:15                     ` Linus Torvalds
2010-01-07 21:49                       ` Peter Zijlstra
2010-01-07 18:44                   ` Linus Torvalds
2010-01-07 19:20                     ` Paul E. McKenney
2010-01-07 20:06                       ` Linus Torvalds
2010-01-07 20:25                         ` Paul E. McKenney
2010-01-07 19:24                     ` Christoph Lameter
2010-01-07 20:08                       ` Linus Torvalds
2010-01-07 20:13                         ` Linus Torvalds
2010-01-07 21:44                     ` Peter Zijlstra
2010-01-07 22:33                       ` Linus Torvalds
2010-01-08  0:23                         ` KAMEZAWA Hiroyuki
2010-01-08  0:25                           ` KAMEZAWA Hiroyuki
2010-01-08  0:39                           ` Linus Torvalds
2010-01-08  0:41                             ` Linus Torvalds
2010-01-07 23:51                 ` Rik van Riel
2010-01-04 18:24 ` [RFC][PATCH 7/8] mm,x86: speculative pagefault support Peter Zijlstra
2010-01-04 18:24 ` [RFC][PATCH 8/8] mm: Optimize pte_map_lock() Peter Zijlstra
2010-01-04 21:41 ` [RFC][PATCH 0/8] Speculative pagefault -v3 Rik van Riel
2010-01-04 21:46   ` Peter Zijlstra
2010-01-04 23:20     ` Rik van Riel
2010-01-04 21:59   ` Christoph Lameter
2010-01-05  0:28     ` KAMEZAWA Hiroyuki
2010-01-05  2:26 ` Minchan Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100104182813.599032632@chello.nl \
    --to=a.p.zijlstra@chello.nl \
    --cc=cl@linux-foundation.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=minchan.kim@gmail.com \
    --cc=mingo@elte.hu \
    --cc=nickpiggin@yahoo.com.au \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).