All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mel@csn.ul.ie>,
	Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH 53 of 66] add numa awareness to hugepage allocations
Date: Wed, 03 Nov 2010 16:28:28 +0100	[thread overview]
Message-ID: <223ee926614158fc1353.1288798108@v2.random> (raw)
In-Reply-To: <patchbomb.1288798055@v2.random>

From: Andrea Arcangeli <aarcange@redhat.com>

It's mostly a matter of replacing alloc_pages with alloc_pages_vma after
introducing alloc_pages_vma. khugepaged needs special handling as the
allocation has to happen inside collapse_huge_page where the vma is known and
an error has to be returned to the outer loop to sleep alloc_sleep_millisecs in
case of failure. But it retains the more efficient logic of handling allocation
failures in khugepaged in case of CONFIG_NUMA=n.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
---

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -330,14 +330,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_page_vma(gfp_t gfp_mask,
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#define alloc_pages_vma(gfp_mask, order, vma, addr)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr)	\
+	alloc_pages_vma(gfp_mask, 0, vma, addr)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -620,11 +620,26 @@ static int __do_huge_pmd_anonymous_page(
 	return ret;
 }
 
+static inline gfp_t alloc_hugepage_gfpmask(int defrag)
+{
+	return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
+}
+
+static inline struct page *alloc_hugepage_vma(int defrag,
+					      struct vm_area_struct *vma,
+					      unsigned long haddr)
+{
+	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
+			       HPAGE_PMD_ORDER, vma, haddr);
+}
+
+#ifndef CONFIG_NUMA
 static inline struct page *alloc_hugepage(int defrag)
 {
-	return alloc_pages(GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT),
+	return alloc_pages(alloc_hugepage_gfpmask(defrag),
 			   HPAGE_PMD_ORDER);
 }
+#endif
 
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			       unsigned long address, pmd_t *pmd,
@@ -639,7 +654,8 @@ int do_huge_pmd_anonymous_page(struct mm
 			return VM_FAULT_OOM;
 		if (unlikely(khugepaged_enter(vma)))
 			return VM_FAULT_OOM;
-		page = alloc_hugepage(transparent_hugepage_defrag(vma));
+		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					  vma, haddr);
 		if (unlikely(!page))
 			goto out;
 		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -858,7 +874,8 @@ int do_huge_pmd_wp_page(struct mm_struct
 
 	if (transparent_hugepage_enabled(vma) &&
 	    !transparent_hugepage_debug_cow())
-		new_page = alloc_hugepage(transparent_hugepage_defrag(vma));
+		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					      vma, haddr);
 	else
 		new_page = NULL;
 
@@ -1655,7 +1672,11 @@ static void collapse_huge_page(struct mm
 	unsigned long hstart, hend;
 
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+#ifndef CONFIG_NUMA
 	VM_BUG_ON(!*hpage);
+#else
+	VM_BUG_ON(*hpage);
+#endif
 
 	/*
 	 * Prevent all access to pagetables with the exception of
@@ -1693,7 +1714,15 @@ static void collapse_huge_page(struct mm
 	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
 		goto out;
 
+#ifndef CONFIG_NUMA
 	new_page = *hpage;
+#else
+	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
+	if (unlikely(!new_page)) {
+		*hpage = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+#endif
 	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
 		goto out;
 
@@ -1724,6 +1753,9 @@ static void collapse_huge_page(struct mm
 		spin_unlock(&mm->page_table_lock);
 		anon_vma_unlock(vma->anon_vma);
 		mem_cgroup_uncharge_page(new_page);
+#ifdef CONFIG_NUMA
+		put_page(new_page);
+#endif
 		goto out;
 	}
 
@@ -1759,7 +1791,9 @@ static void collapse_huge_page(struct mm
 	mm->nr_ptes--;
 	spin_unlock(&mm->page_table_lock);
 
+#ifndef CONFIG_NUMA
 	*hpage = NULL;
+#endif
 	khugepaged_pages_collapsed++;
 out:
 	up_write(&mm->mmap_sem);
@@ -1995,11 +2029,16 @@ static void khugepaged_do_scan(struct pa
 	while (progress < pages) {
 		cond_resched();
 
+#ifndef CONFIG_NUMA
 		if (!*hpage) {
 			*hpage = alloc_hugepage(khugepaged_defrag());
 			if (unlikely(!*hpage))
 				break;
 		}
+#else
+		if (IS_ERR(*hpage))
+			break;
+#endif
 
 		spin_lock(&khugepaged_mm_lock);
 		if (!khugepaged_scan.mm_slot)
@@ -2014,37 +2053,55 @@ static void khugepaged_do_scan(struct pa
 	}
 }
 
+static void khugepaged_alloc_sleep(void)
+{
+	DEFINE_WAIT(wait);
+	add_wait_queue(&khugepaged_wait, &wait);
+	schedule_timeout_interruptible(
+		msecs_to_jiffies(
+			khugepaged_alloc_sleep_millisecs));
+	remove_wait_queue(&khugepaged_wait, &wait);
+}
+
+#ifndef CONFIG_NUMA
 static struct page *khugepaged_alloc_hugepage(void)
 {
 	struct page *hpage;
 
 	do {
 		hpage = alloc_hugepage(khugepaged_defrag());
-		if (!hpage) {
-			DEFINE_WAIT(wait);
-			add_wait_queue(&khugepaged_wait, &wait);
-			schedule_timeout_interruptible(
-				msecs_to_jiffies(
-					khugepaged_alloc_sleep_millisecs));
-			remove_wait_queue(&khugepaged_wait, &wait);
-		}
+		if (!hpage)
+			khugepaged_alloc_sleep();
 	} while (unlikely(!hpage) &&
 		 likely(khugepaged_enabled()));
 	return hpage;
 }
+#endif
 
 static void khugepaged_loop(void)
 {
 	struct page *hpage;
 
+#ifdef CONFIG_NUMA
+	hpage = NULL;
+#endif
 	while (likely(khugepaged_enabled())) {
+#ifndef CONFIG_NUMA
 		hpage = khugepaged_alloc_hugepage();
 		if (unlikely(!hpage))
 			break;
+#else
+		if (IS_ERR(hpage)) {
+			khugepaged_alloc_sleep();
+			hpage = NULL;
+		}
+#endif
 
 		khugepaged_do_scan(&hpage);
+#ifndef CONFIG_NUMA
 		if (hpage)
 			put_page(hpage);
+#endif
 		if (khugepaged_has_work()) {
 			DEFINE_WAIT(wait);
 			if (!khugepaged_scan_sleep_millisecs)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1794,7 +1794,7 @@ static struct page *alloc_page_interleav
 }
 
 /**
- * 	alloc_page_vma	- Allocate a page for a VMA.
+ * 	alloc_pages_vma	- Allocate a page for a VMA.
  *
  * 	@gfp:
  *      %GFP_USER    user allocation.
@@ -1803,6 +1803,7 @@ static struct page *alloc_page_interleav
  *      %GFP_FS      allocation should not call back into a file system.
  *      %GFP_ATOMIC  don't sleep.
  *
+ *	@order:Order of the GFP allocation.
  * 	@vma:  Pointer to VMA or NULL if not available.
  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
  *
@@ -1816,7 +1817,8 @@ static struct page *alloc_page_interleav
  *	Should be called with the mm_sem of the vma hold.
  */
 struct page *
-alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+		unsigned long addr)
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 	struct zonelist *zl;
@@ -1828,7 +1830,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 
 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
 		mpol_cond_put(pol);
-		page = alloc_page_interleave(gfp, 0, nid);
+		page = alloc_page_interleave(gfp, order, nid);
 		put_mems_allowed();
 		return page;
 	}
@@ -1837,7 +1839,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 		/*
 		 * slow path: ref counted shared policy
 		 */
-		struct page *page =  __alloc_pages_nodemask(gfp, 0,
+		struct page *page =  __alloc_pages_nodemask(gfp, order,
 						zl, policy_nodemask(gfp, pol));
 		__mpol_put(pol);
 		put_mems_allowed();
@@ -1846,7 +1848,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 	/*
 	 * fast path:  default or task policy
 	 */
-	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	page = __alloc_pages_nodemask(gfp, order, zl,
+				      policy_nodemask(gfp, pol));
 	put_mems_allowed();
 	return page;
 }

WARNING: multiple messages have this Message-ID (diff)
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mel@csn.ul.ie>,
	Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH 53 of 66] add numa awareness to hugepage allocations
Date: Wed, 03 Nov 2010 16:28:28 +0100	[thread overview]
Message-ID: <223ee926614158fc1353.1288798108@v2.random> (raw)
In-Reply-To: <patchbomb.1288798055@v2.random>

From: Andrea Arcangeli <aarcange@redhat.com>

It's mostly a matter of replacing alloc_pages with alloc_pages_vma after
introducing alloc_pages_vma. khugepaged needs special handling as the
allocation has to happen inside collapse_huge_page where the vma is known and
an error has to be returned to the outer loop to sleep alloc_sleep_millisecs in
case of failure. But it retains the more efficient logic of handling allocation
failures in khugepaged in case of CONFIG_NUMA=n.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
---

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -330,14 +330,17 @@ alloc_pages(gfp_t gfp_mask, unsigned int
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_page_vma(gfp_t gfp_mask,
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#define alloc_pages_vma(gfp_mask, order, vma, addr)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr)	\
+	alloc_pages_vma(gfp_mask, 0, vma, addr)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -620,11 +620,26 @@ static int __do_huge_pmd_anonymous_page(
 	return ret;
 }
 
+static inline gfp_t alloc_hugepage_gfpmask(int defrag)
+{
+	return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
+}
+
+static inline struct page *alloc_hugepage_vma(int defrag,
+					      struct vm_area_struct *vma,
+					      unsigned long haddr)
+{
+	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
+			       HPAGE_PMD_ORDER, vma, haddr);
+}
+
+#ifndef CONFIG_NUMA
 static inline struct page *alloc_hugepage(int defrag)
 {
-	return alloc_pages(GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT),
+	return alloc_pages(alloc_hugepage_gfpmask(defrag),
 			   HPAGE_PMD_ORDER);
 }
+#endif
 
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			       unsigned long address, pmd_t *pmd,
@@ -639,7 +654,8 @@ int do_huge_pmd_anonymous_page(struct mm
 			return VM_FAULT_OOM;
 		if (unlikely(khugepaged_enter(vma)))
 			return VM_FAULT_OOM;
-		page = alloc_hugepage(transparent_hugepage_defrag(vma));
+		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					  vma, haddr);
 		if (unlikely(!page))
 			goto out;
 		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -858,7 +874,8 @@ int do_huge_pmd_wp_page(struct mm_struct
 
 	if (transparent_hugepage_enabled(vma) &&
 	    !transparent_hugepage_debug_cow())
-		new_page = alloc_hugepage(transparent_hugepage_defrag(vma));
+		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					      vma, haddr);
 	else
 		new_page = NULL;
 
@@ -1655,7 +1672,11 @@ static void collapse_huge_page(struct mm
 	unsigned long hstart, hend;
 
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+#ifndef CONFIG_NUMA
 	VM_BUG_ON(!*hpage);
+#else
+	VM_BUG_ON(*hpage);
+#endif
 
 	/*
 	 * Prevent all access to pagetables with the exception of
@@ -1693,7 +1714,15 @@ static void collapse_huge_page(struct mm
 	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
 		goto out;
 
+#ifndef CONFIG_NUMA
 	new_page = *hpage;
+#else
+	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
+	if (unlikely(!new_page)) {
+		*hpage = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+#endif
 	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
 		goto out;
 
@@ -1724,6 +1753,9 @@ static void collapse_huge_page(struct mm
 		spin_unlock(&mm->page_table_lock);
 		anon_vma_unlock(vma->anon_vma);
 		mem_cgroup_uncharge_page(new_page);
+#ifdef CONFIG_NUMA
+		put_page(new_page);
+#endif
 		goto out;
 	}
 
@@ -1759,7 +1791,9 @@ static void collapse_huge_page(struct mm
 	mm->nr_ptes--;
 	spin_unlock(&mm->page_table_lock);
 
+#ifndef CONFIG_NUMA
 	*hpage = NULL;
+#endif
 	khugepaged_pages_collapsed++;
 out:
 	up_write(&mm->mmap_sem);
@@ -1995,11 +2029,16 @@ static void khugepaged_do_scan(struct pa
 	while (progress < pages) {
 		cond_resched();
 
+#ifndef CONFIG_NUMA
 		if (!*hpage) {
 			*hpage = alloc_hugepage(khugepaged_defrag());
 			if (unlikely(!*hpage))
 				break;
 		}
+#else
+		if (IS_ERR(*hpage))
+			break;
+#endif
 
 		spin_lock(&khugepaged_mm_lock);
 		if (!khugepaged_scan.mm_slot)
@@ -2014,37 +2053,55 @@ static void khugepaged_do_scan(struct pa
 	}
 }
 
+static void khugepaged_alloc_sleep(void)
+{
+	DEFINE_WAIT(wait);
+	add_wait_queue(&khugepaged_wait, &wait);
+	schedule_timeout_interruptible(
+		msecs_to_jiffies(
+			khugepaged_alloc_sleep_millisecs));
+	remove_wait_queue(&khugepaged_wait, &wait);
+}
+
+#ifndef CONFIG_NUMA
 static struct page *khugepaged_alloc_hugepage(void)
 {
 	struct page *hpage;
 
 	do {
 		hpage = alloc_hugepage(khugepaged_defrag());
-		if (!hpage) {
-			DEFINE_WAIT(wait);
-			add_wait_queue(&khugepaged_wait, &wait);
-			schedule_timeout_interruptible(
-				msecs_to_jiffies(
-					khugepaged_alloc_sleep_millisecs));
-			remove_wait_queue(&khugepaged_wait, &wait);
-		}
+		if (!hpage)
+			khugepaged_alloc_sleep();
 	} while (unlikely(!hpage) &&
 		 likely(khugepaged_enabled()));
 	return hpage;
 }
+#endif
 
 static void khugepaged_loop(void)
 {
 	struct page *hpage;
 
+#ifdef CONFIG_NUMA
+	hpage = NULL;
+#endif
 	while (likely(khugepaged_enabled())) {
+#ifndef CONFIG_NUMA
 		hpage = khugepaged_alloc_hugepage();
 		if (unlikely(!hpage))
 			break;
+#else
+		if (IS_ERR(hpage)) {
+			khugepaged_alloc_sleep();
+			hpage = NULL;
+		}
+#endif
 
 		khugepaged_do_scan(&hpage);
+#ifndef CONFIG_NUMA
 		if (hpage)
 			put_page(hpage);
+#endif
 		if (khugepaged_has_work()) {
 			DEFINE_WAIT(wait);
 			if (!khugepaged_scan_sleep_millisecs)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1794,7 +1794,7 @@ static struct page *alloc_page_interleav
 }
 
 /**
- * 	alloc_page_vma	- Allocate a page for a VMA.
+ * 	alloc_pages_vma	- Allocate a page for a VMA.
  *
  * 	@gfp:
  *      %GFP_USER    user allocation.
@@ -1803,6 +1803,7 @@ static struct page *alloc_page_interleav
  *      %GFP_FS      allocation should not call back into a file system.
  *      %GFP_ATOMIC  don't sleep.
  *
+ *	@order:Order of the GFP allocation.
  * 	@vma:  Pointer to VMA or NULL if not available.
  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
  *
@@ -1816,7 +1817,8 @@ static struct page *alloc_page_interleav
  *	Should be called with the mm_sem of the vma hold.
  */
 struct page *
-alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+		unsigned long addr)
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 	struct zonelist *zl;
@@ -1828,7 +1830,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 
 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
 		mpol_cond_put(pol);
-		page = alloc_page_interleave(gfp, 0, nid);
+		page = alloc_page_interleave(gfp, order, nid);
 		put_mems_allowed();
 		return page;
 	}
@@ -1837,7 +1839,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 		/*
 		 * slow path: ref counted shared policy
 		 */
-		struct page *page =  __alloc_pages_nodemask(gfp, 0,
+		struct page *page =  __alloc_pages_nodemask(gfp, order,
 						zl, policy_nodemask(gfp, pol));
 		__mpol_put(pol);
 		put_mems_allowed();
@@ -1846,7 +1848,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area
 	/*
 	 * fast path:  default or task policy
 	 */
-	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	page = __alloc_pages_nodemask(gfp, order, zl,
+				      policy_nodemask(gfp, pol));
 	put_mems_allowed();
 	return page;
 }

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-11-03 15:36 UTC|newest]

Thread overview: 331+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-11-03 15:27 [PATCH 00 of 66] Transparent Hugepage Support #32 Andrea Arcangeli
2010-11-03 15:27 ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 01 of 66] disable lumpy when compaction is enabled Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:18   ` KOSAKI Motohiro
2010-11-09  3:18     ` KOSAKI Motohiro
2010-11-09 21:30     ` Andrea Arcangeli
2010-11-09 21:30       ` Andrea Arcangeli
2010-11-09 21:38       ` Mel Gorman
2010-11-09 21:38         ` Mel Gorman
2010-11-09 22:22         ` Andrea Arcangeli
2010-11-09 22:22           ` Andrea Arcangeli
2010-11-10 14:27           ` Mel Gorman
2010-11-10 14:27             ` Mel Gorman
2010-11-10 16:03             ` Andrea Arcangeli
2010-11-10 16:03               ` Andrea Arcangeli
2010-11-18  8:30   ` Mel Gorman
2010-11-18  8:30     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 02 of 66] mm, migration: Fix race between shift_arg_pages and rmap_walk by guaranteeing rmap_walk finds PTEs created within the temporary stack Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:01   ` KOSAKI Motohiro
2010-11-09  3:01     ` KOSAKI Motohiro
2010-11-09 21:36     ` Andrea Arcangeli
2010-11-09 21:36       ` Andrea Arcangeli
2010-11-18 11:13   ` Mel Gorman
2010-11-18 11:13     ` Mel Gorman
2010-11-18 17:13     ` Mel Gorman
2010-11-18 17:13       ` Mel Gorman
2010-11-19 17:38     ` Andrea Arcangeli
2010-11-19 17:38       ` Andrea Arcangeli
2010-11-19 17:54       ` Linus Torvalds
2010-11-19 17:54         ` Linus Torvalds
2010-11-19 19:26         ` Andrea Arcangeli
2010-11-19 19:26           ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 03 of 66] transparent hugepage support documentation Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:41   ` Mel Gorman
2010-11-18 11:41     ` Mel Gorman
2010-11-25 14:35     ` Andrea Arcangeli
2010-11-25 14:35       ` Andrea Arcangeli
2010-11-26 11:40       ` Mel Gorman
2010-11-26 11:40         ` Mel Gorman
2010-11-03 15:27 ` [PATCH 04 of 66] define MADV_HUGEPAGE Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:44   ` Mel Gorman
2010-11-18 11:44     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 05 of 66] compound_lock Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:49   ` Mel Gorman
2010-11-18 11:49     ` Mel Gorman
2010-11-18 17:28     ` Linus Torvalds
2010-11-18 17:28       ` Linus Torvalds
2010-11-25 16:21       ` Andrea Arcangeli
2010-11-25 16:21         ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 06 of 66] alter compound get_page/put_page Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:37   ` Mel Gorman
2010-11-18 12:37     ` Mel Gorman
2010-11-25 16:49     ` Andrea Arcangeli
2010-11-25 16:49       ` Andrea Arcangeli
2010-11-26 11:46       ` Mel Gorman
2010-11-26 11:46         ` Mel Gorman
2010-11-03 15:27 ` [PATCH 07 of 66] update futex compound knowledge Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 08 of 66] fix bad_page to show the real reason the page is bad Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:03   ` KOSAKI Motohiro
2010-11-09  3:03     ` KOSAKI Motohiro
2010-11-18 12:40   ` Mel Gorman
2010-11-18 12:40     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 09 of 66] clear compound mapping Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 10 of 66] add native_set_pmd_at Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 11 of 66] add pmd paravirt ops Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-26 18:01   ` Andrea Arcangeli
2010-11-26 18:01     ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 12 of 66] no paravirt version of pmd ops Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 13 of 66] export maybe_mkwrite Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2011-01-17 14:14   ` Michal Simek
2011-01-17 14:14     ` Michal Simek
2011-01-17 14:33     ` Andrea Arcangeli
2011-01-17 14:33       ` Andrea Arcangeli
2011-01-18 14:29       ` Michal Simek
2011-01-18 14:29         ` Michal Simek
2011-01-18 20:32         ` Andrea Arcangeli
2011-01-18 20:32           ` Andrea Arcangeli
2011-01-20  7:03           ` Michal Simek
2010-11-03 15:27 ` [PATCH 14 of 66] comment reminder in destroy_compound_page Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 15 of 66] config_transparent_hugepage Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 16 of 66] special pmd_trans_* functions Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:51   ` Mel Gorman
2010-11-18 12:51     ` Mel Gorman
2010-11-25 17:10     ` Andrea Arcangeli
2010-11-25 17:10       ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 17 of 66] add pmd mangling generic functions Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:52   ` Mel Gorman
2010-11-18 12:52     ` Mel Gorman
2010-11-18 17:32     ` Linus Torvalds
2010-11-18 17:32       ` Linus Torvalds
2010-11-25 17:35       ` Andrea Arcangeli
2010-11-25 17:35         ` Andrea Arcangeli
2010-11-26 22:24         ` Linus Torvalds
2010-11-26 22:24           ` Linus Torvalds
2010-12-02 17:50           ` Andrea Arcangeli
2010-12-02 17:50             ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 18 of 66] add pmd mangling functions to x86 Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:04   ` Mel Gorman
2010-11-18 13:04     ` Mel Gorman
2010-11-26 17:57     ` Andrea Arcangeli
2010-11-26 17:57       ` Andrea Arcangeli
2010-11-29 10:23       ` Mel Gorman
2010-11-29 10:23         ` Mel Gorman
2010-11-29 16:59         ` Andrea Arcangeli
2010-11-29 16:59           ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 19 of 66] bail out gup_fast on splitting pmd Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 20 of 66] pte alloc trans splitting Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 21 of 66] add pmd mmu_notifier helpers Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 22 of 66] clear page compound Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:11   ` Mel Gorman
2010-11-18 13:11     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 23 of 66] add pmd_huge_pte to mm_struct Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:13   ` Mel Gorman
2010-11-18 13:13     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 24 of 66] split_huge_page_mm/vma Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 25 of 66] split_huge_page paging Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 26 of 66] clear_copy_huge_page Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 27 of 66] kvm mmu transparent hugepage support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 28 of 66] _GFP_NO_KSWAPD Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 13:18   ` Mel Gorman
2010-11-18 13:18     ` Mel Gorman
2010-11-29 19:03     ` Andrea Arcangeli
2010-11-29 19:03       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 29 of 66] don't alloc harder for gfp nomemalloc even if nowait Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  3:05   ` KOSAKI Motohiro
2010-11-09  3:05     ` KOSAKI Motohiro
2010-11-18 13:19   ` Mel Gorman
2010-11-18 13:19     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 30 of 66] transparent hugepage core Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:12   ` Mel Gorman
2010-11-18 15:12     ` Mel Gorman
2010-12-07 21:24     ` Andrea Arcangeli
2010-12-07 21:24       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 31 of 66] split_huge_page anon_vma ordering dependency Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:13   ` Mel Gorman
2010-11-18 15:13     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 32 of 66] verify pmd_trans_huge isn't leaking Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:15   ` Mel Gorman
2010-11-18 15:15     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 33 of 66] madvise(MADV_HUGEPAGE) Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:19   ` Mel Gorman
2010-11-18 15:19     ` Mel Gorman
2010-12-09 17:14     ` Andrea Arcangeli
2010-12-09 17:14       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 34 of 66] add PageTransCompound Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:20   ` Mel Gorman
2010-11-18 15:20     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 35 of 66] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 36 of 66] memcg compound Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:26   ` Mel Gorman
2010-11-18 15:26     ` Mel Gorman
2010-11-19  1:10     ` KAMEZAWA Hiroyuki
2010-11-19  1:10       ` KAMEZAWA Hiroyuki
2010-12-14 17:38       ` Andrea Arcangeli
2010-12-14 17:38         ` Andrea Arcangeli
2010-12-15  0:12         ` KAMEZAWA Hiroyuki
2010-12-15  0:12           ` KAMEZAWA Hiroyuki
2010-12-15  5:29           ` Andrea Arcangeli
2010-12-15  5:29             ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 37 of 66] transhuge-memcg: commit tail pages at charge Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 38 of 66] memcontrol: try charging huge pages from stock Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-19  1:14   ` KAMEZAWA Hiroyuki
2010-11-19  1:14     ` KAMEZAWA Hiroyuki
2010-12-14 17:38     ` Andrea Arcangeli
2010-12-14 17:38       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 39 of 66] memcg huge memory Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-19  1:19   ` KAMEZAWA Hiroyuki
2010-11-19  1:19     ` KAMEZAWA Hiroyuki
2010-12-14 17:38     ` Andrea Arcangeli
2010-12-14 17:38       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 40 of 66] transparent hugepage vmstat Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 41 of 66] khugepaged Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 42 of 66] khugepaged vma merge Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 43 of 66] don't leave orhpaned swap cache after ksm merging Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  3:08   ` KOSAKI Motohiro
2010-11-09  3:08     ` KOSAKI Motohiro
2010-11-09 21:40     ` Andrea Arcangeli
2010-11-09 21:40       ` Andrea Arcangeli
2010-11-10  7:49       ` Hugh Dickins
2010-11-10  7:49         ` Hugh Dickins
2010-11-10 16:08         ` Andrea Arcangeli
2010-11-10 16:08           ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 44 of 66] skip transhuge pages in ksm for now Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:06   ` Mel Gorman
2010-11-18 16:06     ` Mel Gorman
2010-12-09 18:13     ` Andrea Arcangeli
2010-12-09 18:13       ` Andrea Arcangeli
2010-12-10 12:17       ` Mel Gorman
2010-12-10 12:17         ` Mel Gorman
2010-11-03 15:28 ` [PATCH 45 of 66] remove PG_buddy Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:08   ` Mel Gorman
2010-11-18 16:08     ` Mel Gorman
2010-12-09 18:15     ` Andrea Arcangeli
2010-12-09 18:15       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 46 of 66] add x86 32bit support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 47 of 66] mincore transparent hugepage support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 48 of 66] add pmd_modify Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 49 of 66] mprotect: pass vma down to page table walkers Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 50 of 66] mprotect: transparent huge page support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 51 of 66] set recommended min free kbytes Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:16   ` Mel Gorman
2010-11-18 16:16     ` Mel Gorman
2010-12-09 18:47     ` Andrea Arcangeli
2010-12-09 18:47       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 52 of 66] enable direct defrag Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:17   ` Mel Gorman
2010-11-18 16:17     ` Mel Gorman
2010-12-09 18:57     ` Andrea Arcangeli
2010-12-09 18:57       ` Andrea Arcangeli
2010-11-03 15:28 ` Andrea Arcangeli [this message]
2010-11-03 15:28   ` [PATCH 53 of 66] add numa awareness to hugepage allocations Andrea Arcangeli
2010-11-29  5:38   ` Daisuke Nishimura
2010-11-29  5:38     ` Daisuke Nishimura
2010-11-29 16:11     ` Andrea Arcangeli
2010-11-29 16:11       ` Andrea Arcangeli
2010-11-30  0:38       ` Daisuke Nishimura
2010-11-30  0:38         ` Daisuke Nishimura
2010-11-30 19:01         ` Andrea Arcangeli
2010-11-30 19:01           ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 54 of 66] transparent hugepage config choice Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 55 of 66] select CONFIG_COMPACTION if TRANSPARENT_HUGEPAGE enabled Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  6:20   ` KOSAKI Motohiro
2010-11-09  6:20     ` KOSAKI Motohiro
2010-11-09 21:11     ` Andrea Arcangeli
2010-11-09 21:11       ` Andrea Arcangeli
2010-11-14  5:07       ` KOSAKI Motohiro
2010-11-14  5:07         ` KOSAKI Motohiro
2010-11-15 15:13         ` Andrea Arcangeli
2010-11-15 15:13           ` Andrea Arcangeli
2010-11-18 16:22       ` Mel Gorman
2010-11-18 16:22         ` Mel Gorman
2010-12-09 19:04         ` Andrea Arcangeli
2010-12-09 19:04           ` Andrea Arcangeli
2010-12-14  9:45           ` Mel Gorman
2010-12-14  9:45             ` Mel Gorman
2010-12-14 16:06             ` Andrea Arcangeli
2010-12-14 16:06               ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 56 of 66] transhuge isolate_migratepages() Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:25   ` Mel Gorman
2010-11-18 16:25     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 57 of 66] avoid breaking huge pmd invariants in case of vma_adjust failures Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 58 of 66] don't allow transparent hugepage support without PSE Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 59 of 66] mmu_notifier_test_young Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 60 of 66] freeze khugepaged and ksmd Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 61 of 66] use compaction for GFP_ATOMIC order > 0 Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09 10:27   ` KOSAKI Motohiro
2010-11-09 10:27     ` KOSAKI Motohiro
2010-11-09 21:49     ` Andrea Arcangeli
2010-11-09 21:49       ` Andrea Arcangeli
2010-11-18 16:31   ` Mel Gorman
2010-11-18 16:31     ` Mel Gorman
2010-12-09 19:10     ` Andrea Arcangeli
2010-12-09 19:10       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 62 of 66] disable transparent hugepages by default on small systems Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:34   ` Mel Gorman
2010-11-18 16:34     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 63 of 66] fix anon memory statistics with transparent hugepages Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 64 of 66] scale nr_rotated to balance memory pressure Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  6:16   ` KOSAKI Motohiro
2010-11-09  6:16     ` KOSAKI Motohiro
2010-11-18 19:15     ` Andrea Arcangeli
2010-11-18 19:15       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 65 of 66] transparent hugepage sysfs meminfo Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 66 of 66] add debug checks for mapcount related invariants Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:39 ` [PATCH 00 of 66] Transparent Hugepage Support #32 Mel Gorman
2010-11-18 16:39   ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=223ee926614158fc1353.1288798108@v2.random \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=bpicco@redhat.com \
    --cc=chris.mason@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.