All of lore.kernel.org
 help / color / mirror / Atom feed
From: Anshuman Khandual <khandual@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: mhocko@suse.com, js1304@gmail.com, vbabka@suse.cz,
	mgorman@suse.de, minchan@kernel.org, akpm@linux-foundation.org,
	aneesh.kumar@linux.vnet.ibm.com, bsingharora@gmail.com
Subject: [RFC 7/8] mm: Add a new migration function migrate_virtual_range()
Date: Mon, 24 Oct 2016 10:01:56 +0530	[thread overview]
Message-ID: <1477283517-2504-8-git-send-email-khandual@linux.vnet.ibm.com> (raw)
In-Reply-To: <1477283517-2504-1-git-send-email-khandual@linux.vnet.ibm.com>

This adds a new virtual address range based migration interface which
can migrate all the mapped pages from a virtual range of a process to
a destination node. This also exports this new function symbol.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 include/linux/mempolicy.h |  7 ++++
 include/linux/migrate.h   |  3 ++
 mm/mempolicy.c            |  7 ++--
 mm/migrate.c              | 84 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 96 insertions(+), 5 deletions(-)

diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 09d4b70..f18c0ea 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -152,6 +152,9 @@ extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 				const nodemask_t *mask);
 extern unsigned int mempolicy_slab_node(void);
+extern int queue_pages_range(struct mm_struct *mm, unsigned long start,
+			unsigned long end, nodemask_t *nodes,
+			unsigned long flags, struct list_head *pagelist);
 
 extern enum zone_type policy_zone;
 
@@ -319,4 +322,8 @@ static inline void mpol_put_task_policy(struct task_struct *task)
 {
 }
 #endif /* CONFIG_NUMA */
+
+#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
+#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
+
 #endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae8d475..e2a1af5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -49,6 +49,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page,
 		struct buffer_head *head, enum migrate_mode mode,
 		int extra_count);
+
+extern int migrate_virtual_range(int pid, unsigned long vaddr,
+				unsigned long size, int nid);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b983cea..aa8479b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -100,10 +100,6 @@
 
 #include "internal.h"
 
-/* Internal flags */
-#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
-#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
-
 static struct kmem_cache *policy_cache;
 static struct kmem_cache *sn_cache;
 
@@ -703,7 +699,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
  * passed via @private.)
  */
-static int
+int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 		nodemask_t *nodes, unsigned long flags,
 		struct list_head *pagelist)
@@ -724,6 +720,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
 	return walk_page_range(start, end, &queue_pages_walk);
 }
+EXPORT_SYMBOL(queue_pages_range);
 
 /*
  * Apply policy to a single VMA
diff --git a/mm/migrate.c b/mm/migrate.c
index 99250ae..06300bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1367,6 +1367,90 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
 	return rc;
 }
 
+static struct page *new_node_page(struct page *page,
+		unsigned long node, int **x)
+{
+	return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE
+					| __GFP_THISNODE, 0);
+}
+
+#ifdef COHERENT_DEVICE
+static void mark_vma_cdm(struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_CDM;
+}
+#else
+static void mark_vma_cdm(struct vm_area_struct *vma) {}
+#endif
+
+/*
+ * migrate_virtual_range - migrate all the pages faulted within a virtual
+ *			address range to a specified node.
+ *
+ * @pid:		PID of the task
+ * @start:		Virtual address range beginning
+ * @end:		Virtual address range end
+ * @nid:		Target migration node
+ *
+ * The function first scans the process VMA list to find out the VMA which
+ * contains the given virtual range. Then validates that the virtual range
+ * is within the given VMA's limits.
+ *
+ * Returns the number of pages that were not migrated or an error code.
+ */
+int migrate_virtual_range(int pid, unsigned long start,
+			unsigned long end, int nid)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	nodemask_t nmask;
+	int ret = -EINVAL;
+
+	LIST_HEAD(mlist);
+
+	nodes_clear(nmask);
+	nodes_setall(nmask);
+
+	if ((!start) || (!end))
+		return -EINVAL;
+
+	rcu_read_lock();
+	mm = find_task_by_vpid(pid)->mm;
+	rcu_read_unlock();
+
+	start &= PAGE_MASK;
+	end &= PAGE_MASK;
+	down_write(&mm->mmap_sem);
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if  ((start < vma->vm_start) || (end > vma->vm_end))
+			continue;
+
+		ret = queue_pages_range(mm, start, end, &nmask, MPOL_MF_MOVE_ALL
+						| MPOL_MF_DISCONTIG_OK, &mlist);
+		if (ret) {
+			putback_movable_pages(&mlist);
+			break;
+		}
+
+		if (list_empty(&mlist)) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = migrate_pages(&mlist, new_node_page, NULL, nid,
+					MIGRATE_SYNC, MR_COMPACTION);
+		if (ret) {
+			putback_movable_pages(&mlist);
+		} else {
+			if (isolated_cdm_node(nid))
+				mark_vma_cdm(vma);
+		}
+	}
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL(migrate_virtual_range);
+
 #ifdef CONFIG_NUMA
 /*
  * Move a list of individual pages
-- 
2.1.0

WARNING: multiple messages have this Message-ID (diff)
From: Anshuman Khandual <khandual@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: mhocko@suse.com, js1304@gmail.com, vbabka@suse.cz,
	mgorman@suse.de, minchan@kernel.org, akpm@linux-foundation.org,
	aneesh.kumar@linux.vnet.ibm.com, bsingharora@gmail.com
Subject: [RFC 7/8] mm: Add a new migration function migrate_virtual_range()
Date: Mon, 24 Oct 2016 10:01:56 +0530	[thread overview]
Message-ID: <1477283517-2504-8-git-send-email-khandual@linux.vnet.ibm.com> (raw)
In-Reply-To: <1477283517-2504-1-git-send-email-khandual@linux.vnet.ibm.com>

This adds a new virtual address range based migration interface which
can migrate all the mapped pages from a virtual range of a process to
a destination node. This also exports this new function symbol.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
 include/linux/mempolicy.h |  7 ++++
 include/linux/migrate.h   |  3 ++
 mm/mempolicy.c            |  7 ++--
 mm/migrate.c              | 84 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 96 insertions(+), 5 deletions(-)

diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 09d4b70..f18c0ea 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -152,6 +152,9 @@ extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 				const nodemask_t *mask);
 extern unsigned int mempolicy_slab_node(void);
+extern int queue_pages_range(struct mm_struct *mm, unsigned long start,
+			unsigned long end, nodemask_t *nodes,
+			unsigned long flags, struct list_head *pagelist);
 
 extern enum zone_type policy_zone;
 
@@ -319,4 +322,8 @@ static inline void mpol_put_task_policy(struct task_struct *task)
 {
 }
 #endif /* CONFIG_NUMA */
+
+#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
+#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
+
 #endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae8d475..e2a1af5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -49,6 +49,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page,
 		struct buffer_head *head, enum migrate_mode mode,
 		int extra_count);
+
+extern int migrate_virtual_range(int pid, unsigned long vaddr,
+				unsigned long size, int nid);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b983cea..aa8479b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -100,10 +100,6 @@
 
 #include "internal.h"
 
-/* Internal flags */
-#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
-#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
-
 static struct kmem_cache *policy_cache;
 static struct kmem_cache *sn_cache;
 
@@ -703,7 +699,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
  * passed via @private.)
  */
-static int
+int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 		nodemask_t *nodes, unsigned long flags,
 		struct list_head *pagelist)
@@ -724,6 +720,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
 	return walk_page_range(start, end, &queue_pages_walk);
 }
+EXPORT_SYMBOL(queue_pages_range);
 
 /*
  * Apply policy to a single VMA
diff --git a/mm/migrate.c b/mm/migrate.c
index 99250ae..06300bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1367,6 +1367,90 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
 	return rc;
 }
 
+static struct page *new_node_page(struct page *page,
+		unsigned long node, int **x)
+{
+	return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE
+					| __GFP_THISNODE, 0);
+}
+
+#ifdef COHERENT_DEVICE
+static void mark_vma_cdm(struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_CDM;
+}
+#else
+static void mark_vma_cdm(struct vm_area_struct *vma) {}
+#endif
+
+/*
+ * migrate_virtual_range - migrate all the pages faulted within a virtual
+ *			address range to a specified node.
+ *
+ * @pid:		PID of the task
+ * @start:		Virtual address range beginning
+ * @end:		Virtual address range end
+ * @nid:		Target migration node
+ *
+ * The function first scans the process VMA list to find out the VMA which
+ * contains the given virtual range. Then validates that the virtual range
+ * is within the given VMA's limits.
+ *
+ * Returns the number of pages that were not migrated or an error code.
+ */
+int migrate_virtual_range(int pid, unsigned long start,
+			unsigned long end, int nid)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	nodemask_t nmask;
+	int ret = -EINVAL;
+
+	LIST_HEAD(mlist);
+
+	nodes_clear(nmask);
+	nodes_setall(nmask);
+
+	if ((!start) || (!end))
+		return -EINVAL;
+
+	rcu_read_lock();
+	mm = find_task_by_vpid(pid)->mm;
+	rcu_read_unlock();
+
+	start &= PAGE_MASK;
+	end &= PAGE_MASK;
+	down_write(&mm->mmap_sem);
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if  ((start < vma->vm_start) || (end > vma->vm_end))
+			continue;
+
+		ret = queue_pages_range(mm, start, end, &nmask, MPOL_MF_MOVE_ALL
+						| MPOL_MF_DISCONTIG_OK, &mlist);
+		if (ret) {
+			putback_movable_pages(&mlist);
+			break;
+		}
+
+		if (list_empty(&mlist)) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = migrate_pages(&mlist, new_node_page, NULL, nid,
+					MIGRATE_SYNC, MR_COMPACTION);
+		if (ret) {
+			putback_movable_pages(&mlist);
+		} else {
+			if (isolated_cdm_node(nid))
+				mark_vma_cdm(vma);
+		}
+	}
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL(migrate_virtual_range);
+
 #ifdef CONFIG_NUMA
 /*
  * Move a list of individual pages
-- 
2.1.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-10-24  4:33 UTC|newest]

Thread overview: 135+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-24  4:31 [RFC 0/8] Define coherent device memory node Anshuman Khandual
2016-10-24  4:31 ` Anshuman Khandual
2016-10-24  4:31 ` [RFC 1/8] mm: " Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24 17:09   ` Dave Hansen
2016-10-24 17:09     ` Dave Hansen
2016-10-25  1:22     ` Anshuman Khandual
2016-10-25  1:22       ` Anshuman Khandual
2016-10-25 15:47       ` Dave Hansen
2016-10-25 15:47         ` Dave Hansen
2016-10-24  4:31 ` [RFC 2/8] mm: Add specialized fallback zonelist for coherent device memory nodes Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24 17:10   ` Dave Hansen
2016-10-24 17:10     ` Dave Hansen
2016-10-25  1:27     ` Anshuman Khandual
2016-10-25  1:27       ` Anshuman Khandual
2016-11-17  7:40   ` Anshuman Khandual
2016-11-17  7:40     ` Anshuman Khandual
2016-11-17  7:59     ` [DRAFT 1/2] mm/cpuset: Exclude CDM nodes from each task's mems_allowed node mask Anshuman Khandual
2016-11-17  7:59       ` Anshuman Khandual
2016-11-17  7:59       ` [DRAFT 2/2] mm/hugetlb: Restrict HugeTLB allocations only to the system RAM nodes Anshuman Khandual
2016-11-17  7:59         ` Anshuman Khandual
2016-11-17  8:28       ` [DRAFT 1/2] mm/cpuset: Exclude CDM nodes from each task's mems_allowed node mask kbuild test robot
2016-10-24  4:31 ` [RFC 3/8] mm: Isolate coherent device memory nodes from HugeTLB allocation paths Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24 17:16   ` Dave Hansen
2016-10-24 17:16     ` Dave Hansen
2016-10-25  4:15     ` Aneesh Kumar K.V
2016-10-25  4:15       ` Aneesh Kumar K.V
2016-10-25  7:17       ` Balbir Singh
2016-10-25  7:17         ` Balbir Singh
2016-10-25  7:25         ` Balbir Singh
2016-10-25  7:25           ` Balbir Singh
2016-10-24  4:31 ` [RFC 4/8] mm: Accommodate coherent device memory nodes in MPOL_BIND implementation Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24  4:31 ` [RFC 5/8] mm: Add new flag VM_CDM for coherent device memory Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24 17:38   ` Dave Hansen
2016-10-24 17:38     ` Dave Hansen
2016-10-24 18:00     ` Dave Hansen
2016-10-24 18:00       ` Dave Hansen
2016-10-25 12:36     ` Balbir Singh
2016-10-25 12:36       ` Balbir Singh
2016-10-25 19:20     ` Aneesh Kumar K.V
2016-10-25 19:20       ` Aneesh Kumar K.V
2016-10-25 20:01       ` Dave Hansen
2016-10-25 20:01         ` Dave Hansen
2016-10-24  4:31 ` [RFC 6/8] mm: Make VM_CDM marked VMAs non migratable Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-24  4:31 ` Anshuman Khandual [this message]
2016-10-24  4:31   ` [RFC 7/8] mm: Add a new migration function migrate_virtual_range() Anshuman Khandual
2016-10-24  4:31 ` [RFC 8/8] mm: Add N_COHERENT_DEVICE node type into node_states[] Anshuman Khandual
2016-10-24  4:31   ` Anshuman Khandual
2016-10-25  7:22   ` Balbir Singh
2016-10-25  7:22     ` Balbir Singh
2016-10-26  4:52     ` Anshuman Khandual
2016-10-26  4:52       ` Anshuman Khandual
2016-10-24  4:42 ` [DEBUG 00/10] Test and debug patches for coherent device memory Anshuman Khandual
2016-10-24  4:42   ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 01/10] dt-bindings: Add doc for ibm,hotplug-aperture Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 02/10] powerpc/mm: Create numa nodes for hotplug memory Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 03/10] powerpc/mm: Allow memory hotplug into a memory less node Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 04/10] mm: Enable CONFIG_MOVABLE_NODE on powerpc Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 05/10] powerpc/mm: Identify isolation seeking coherent memory nodes during boot Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 06/10] mm: Export definition of 'zone_names' array through mmzone.h Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 07/10] mm: Add debugfs interface to dump each node's zonelist information Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 08/10] powerpc: Enable CONFIG_MOVABLE_NODE for PPC64 platform Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 09/10] drivers: Add two drivers for coherent device memory tests Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24  4:42   ` [DEBUG 10/10] test: Add a script to perform random VMA migrations across nodes Anshuman Khandual
2016-10-24  4:42     ` Anshuman Khandual
2016-10-24 17:09 ` [RFC 0/8] Define coherent device memory node Jerome Glisse
2016-10-24 17:09   ` Jerome Glisse
2016-10-25  4:26   ` Aneesh Kumar K.V
2016-10-25  4:26     ` Aneesh Kumar K.V
2016-10-25 15:16     ` Jerome Glisse
2016-10-25 15:16       ` Jerome Glisse
2016-10-26 11:09       ` Aneesh Kumar K.V
2016-10-26 11:09         ` Aneesh Kumar K.V
2016-10-26 16:07         ` Jerome Glisse
2016-10-26 16:07           ` Jerome Glisse
2016-10-28  5:29           ` Aneesh Kumar K.V
2016-10-28  5:29             ` Aneesh Kumar K.V
2016-10-28 16:16             ` Jerome Glisse
2016-10-28 16:16               ` Jerome Glisse
2016-11-05  5:21     ` Anshuman Khandual
2016-11-05  5:21       ` Anshuman Khandual
2016-11-05 18:02       ` Jerome Glisse
2016-11-05 18:02         ` Jerome Glisse
2016-10-25  4:59   ` Aneesh Kumar K.V
2016-10-25  4:59     ` Aneesh Kumar K.V
2016-10-25 15:32     ` Jerome Glisse
2016-10-25 15:32       ` Jerome Glisse
2016-10-25 17:31       ` Aneesh Kumar K.V
2016-10-25 17:31         ` Aneesh Kumar K.V
2016-10-25 18:52         ` Jerome Glisse
2016-10-25 18:52           ` Jerome Glisse
2016-10-26 11:13           ` Anshuman Khandual
2016-10-26 11:13             ` Anshuman Khandual
2016-10-26 16:02             ` Jerome Glisse
2016-10-26 16:02               ` Jerome Glisse
2016-10-27  4:38               ` Anshuman Khandual
2016-10-27  4:38                 ` Anshuman Khandual
2016-10-27  7:03                 ` Anshuman Khandual
2016-10-27  7:03                   ` Anshuman Khandual
2016-10-27 15:05                   ` Jerome Glisse
2016-10-27 15:05                     ` Jerome Glisse
2016-10-28  5:47                     ` Anshuman Khandual
2016-10-28  5:47                       ` Anshuman Khandual
2016-10-28 16:08                       ` Jerome Glisse
2016-10-28 16:08                         ` Jerome Glisse
2016-10-26 12:56           ` Anshuman Khandual
2016-10-26 12:56             ` Anshuman Khandual
2016-10-26 16:28             ` Jerome Glisse
2016-10-26 16:28               ` Jerome Glisse
2016-10-27 10:23               ` Balbir Singh
2016-10-27 10:23                 ` Balbir Singh
2016-10-25 12:07   ` Balbir Singh
2016-10-25 12:07     ` Balbir Singh
2016-10-25 15:21     ` Jerome Glisse
2016-10-25 15:21       ` Jerome Glisse
2016-10-24 18:04 ` Dave Hansen
2016-10-24 18:04   ` Dave Hansen
2016-10-24 18:32   ` David Nellans
2016-10-24 18:32     ` David Nellans
2016-10-24 19:36     ` Dave Hansen
2016-10-24 19:36       ` Dave Hansen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1477283517-2504-8-git-send-email-khandual@linux.vnet.ibm.com \
    --to=khandual@linux.vnet.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=bsingharora@gmail.com \
    --cc=js1304@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.