linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch] mm, thp: only collapse hugepages to nodes with affinity
@ 2014-07-15  1:09 David Rientjes
  2014-07-15  4:47 ` Dave Hansen
  2014-07-16  0:13 ` [patch v2] mm, tmp: only collapse hugepages to nodes with affinity for zone_reclaim_mode David Rientjes
  0 siblings, 2 replies; 15+ messages in thread
From: David Rientjes @ 2014-07-15  1:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Andrea Arcangeli, Mel Gorman, Rik van Riel, Kirill A. Shutemov,
	Bob Liu, linux-mm, linux-kernel

Commit 9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target 
node") improved the previous khugepaged logic which allocated a 
transparent hugepages from the node of the first page being collapsed.

However, it is still possible to collapse pages to remote memory which may 
suffer from additional access latency.  With the current policy, it is 
possible that 255 pages (with PAGE_SHIFT == 12) will be collapsed remotely 
if the majority are allocated from that node.

Introduce a strict requirement that pages can only be collapsed to nodes 
at or below RECLAIM_DISTANCE to ensure the access latency of the pages 
scanned does not regress.

Signed-off-by: David Rientjes <rientjes@google.com>
---
 mm/huge_memory.c | 54 ++++++++++++------------------------------------------
 1 file changed, 12 insertions(+), 42 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2231,34 +2231,7 @@ static void khugepaged_alloc_sleep(void)
 			msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
-static int khugepaged_node_load[MAX_NUMNODES];
-
 #ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(void)
-{
-	static int last_khugepaged_target_node = NUMA_NO_NODE;
-	int nid, target_node = 0, max_value = 0;
-
-	/* find first node with max normal pages hit */
-	for (nid = 0; nid < MAX_NUMNODES; nid++)
-		if (khugepaged_node_load[nid] > max_value) {
-			max_value = khugepaged_node_load[nid];
-			target_node = nid;
-		}
-
-	/* do some balance if several nodes have the same hit record */
-	if (target_node <= last_khugepaged_target_node)
-		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
-				nid++)
-			if (max_value == khugepaged_node_load[nid]) {
-				target_node = nid;
-				break;
-			}
-
-	last_khugepaged_target_node = target_node;
-	return target_node;
-}
-
 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 {
 	if (IS_ERR(*hpage)) {
@@ -2309,11 +2282,6 @@ static struct page
 	return *hpage;
 }
 #else
-static int khugepaged_find_target_node(void)
-{
-	return 0;
-}
-
 static inline struct page *alloc_hugepage(int defrag)
 {
 	return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
@@ -2522,7 +2490,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 	if (!pmd)
 		goto out;
 
-	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
 	     _pte++, _address += PAGE_SIZE) {
@@ -2538,14 +2505,18 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 		page = vm_normal_page(vma, _address, pteval);
 		if (unlikely(!page))
 			goto out_unmap;
-		/*
-		 * Record which node the original page is from and save this
-		 * information to khugepaged_node_load[].
-		 * Khupaged will allocate hugepage from the node has the max
-		 * hit record.
-		 */
-		node = page_to_nid(page);
-		khugepaged_node_load[node]++;
+		if (node == NUMA_NO_NODE) {
+			node = page_to_nid(page);
+		} else {
+			int distance = node_distance(page_to_nid(page), node);
+
+			/*
+			 * Do not migrate to memory that would not be reclaimed
+			 * from.
+			 */
+			if (distance > RECLAIM_DISTANCE)
+				goto out_unmap;
+		}
 		VM_BUG_ON_PAGE(PageCompound(page), page);
 		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
 			goto out_unmap;
@@ -2561,7 +2532,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
 	if (ret) {
-		node = khugepaged_find_target_node();
 		/* collapse_huge_page will return with the mmap_sem released */
 		collapse_huge_page(mm, address, hpage, vma, node);
 	}

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2014-07-28  8:42 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-07-15  1:09 [patch] mm, thp: only collapse hugepages to nodes with affinity David Rientjes
2014-07-15  4:47 ` Dave Hansen
2014-07-15 23:17   ` David Rientjes
2014-07-16  0:13 ` [patch v2] mm, tmp: only collapse hugepages to nodes with affinity for zone_reclaim_mode David Rientjes
2014-07-16  1:22   ` Bob Liu
2014-07-16 15:47     ` Vlastimil Babka
2014-07-16 19:37       ` Hugh Dickins
2014-07-17  0:49       ` David Rientjes
2014-07-16 15:38   ` Vlastimil Babka
2014-07-17  0:54     ` David Rientjes
2014-07-17  0:59       ` [patch v3] mm, thp: " David Rientjes
2014-07-17 16:28         ` Dave Hansen
2014-07-17 21:48           ` [patch v4] " David Rientjes
2014-07-25 15:34             ` Mel Gorman
2014-07-28  8:42         ` [patch v3] " Vlastimil Babka

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).