All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>, Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 47/63] mm: numa: Do not batch handle PMD pages
Date: Mon,  7 Oct 2013 11:29:25 +0100	[thread overview]
Message-ID: <1381141781-10992-48-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1381141781-10992-1-git-send-email-mgorman@suse.de>

With the THP migration races closed it is still possible to occasionally
see corruption. The problem is related to handling PMD pages in batch.
When a page fault is handled it can be assumed that the page being
faulted will also be flushed from the TLB. The same flushing does not
happen when handling PMD pages in batch. Fixing is straight forward but
there are a number of reasons not to

1. Multiple TLB flushes may have to be sent depending on what pages get
   migrated
2. The handling of PMDs in batch means that faults get accounted to
   the task that is handling the fault. While care is taken to only
   mark PMDs where the last CPU and PID match it can still have problems
   due to PID truncation when matching PIDs.
3. Batching on the PMD level may reduce faults but setting pmd_numa
   requires taking a heavy lock that can contend with THP migration
   and handling the fault requires the release/acquisition of the PTL
   for every page migrated. It's still pretty heavy.

PMD batch handling is not something that people ever have been happy
with. This patch removes it and later patches will deal with the
additional fault overhead using more installigent migrate rate adaption.

Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 mm/memory.c   | 101 ++--------------------------------------------------------
 mm/mprotect.c |  47 ++-------------------------
 2 files changed, 4 insertions(+), 144 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index eba846b..9898eeb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3606,103 +3606,6 @@ out:
 	return 0;
 }
 
-/* NUMA hinting page fault entry point for regular pmds */
-#ifdef CONFIG_NUMA_BALANCING
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		     unsigned long addr, pmd_t *pmdp)
-{
-	pmd_t pmd;
-	pte_t *pte, *orig_pte;
-	unsigned long _addr = addr & PMD_MASK;
-	unsigned long offset;
-	spinlock_t *ptl;
-	bool numa = false;
-	int last_cpupid;
-
-	spin_lock(&mm->page_table_lock);
-	pmd = *pmdp;
-	if (pmd_numa(pmd)) {
-		set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
-		numa = true;
-	}
-	spin_unlock(&mm->page_table_lock);
-
-	if (!numa)
-		return 0;
-
-	/* we're in a page fault so some vma must be in the range */
-	BUG_ON(!vma);
-	BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
-	offset = max(_addr, vma->vm_start) & ~PMD_MASK;
-	VM_BUG_ON(offset >= PMD_SIZE);
-	orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
-	pte += offset >> PAGE_SHIFT;
-	for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
-		pte_t pteval = *pte;
-		struct page *page;
-		int page_nid = -1;
-		int target_nid;
-		bool migrated = false;
-		int flags = 0;
-
-		if (!pte_present(pteval))
-			continue;
-		if (!pte_numa(pteval))
-			continue;
-		if (addr >= vma->vm_end) {
-			vma = find_vma(mm, addr);
-			/* there's a pte present so there must be a vma */
-			BUG_ON(!vma);
-			BUG_ON(addr < vma->vm_start);
-		}
-		if (pte_numa(pteval)) {
-			pteval = pte_mknonnuma(pteval);
-			set_pte_at(mm, addr, pte, pteval);
-		}
-		page = vm_normal_page(vma, addr, pteval);
-		if (unlikely(!page))
-			continue;
-
-		/*
-		 * Avoid grouping on DSO/COW pages in specific and RO pages
-		 * in general, RO pages shouldn't hurt as much anyway since
-		 * they can be in shared cache state.
-		 */
-		if (!pte_write(pteval))
-			flags |= TNF_NO_GROUP;
-
-		last_cpupid = page_cpupid_last(page);
-		page_nid = page_to_nid(page);
-		target_nid = numa_migrate_prep(page, vma, addr, page_nid);
-		pte_unmap_unlock(pte, ptl);
-		if (target_nid != -1) {
-			migrated = migrate_misplaced_page(page, vma, target_nid);
-			if (migrated) {
-				page_nid = target_nid;
-				flags |= TNF_MIGRATED;
-			}
-		} else {
-			put_page(page);
-		}
-
-		if (page_nid != -1)
-			task_numa_fault(last_cpupid, page_nid, 1, flags);
-
-		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
-	}
-	pte_unmap_unlock(orig_pte, ptl);
-
-	return 0;
-}
-#else
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		     unsigned long addr, pmd_t *pmdp)
-{
-	BUG();
-	return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3841,8 +3744,8 @@ retry:
 		}
 	}
 
-	if (pmd_numa(*pmd))
-		return do_pmd_numa_page(mm, vma, address, pmd);
+	/* THP should already have been handled */
+	BUG_ON(pmd_numa(*pmd));
 
 	/*
 	 * Use __pte_alloc instead of pte_alloc_map, because we can't
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9a74855..a0302ac 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,15 +37,12 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
-		int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
+		int dirty_accountable, int prot_numa)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t *pte, oldpte;
 	spinlock_t *ptl;
 	unsigned long pages = 0;
-	bool all_same_cpupid = true;
-	int last_cpu = -1;
-	int last_pid = -1;
 
 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 	arch_enter_lazy_mmu_mode();
@@ -64,19 +61,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
 				page = vm_normal_page(vma, addr, oldpte);
 				if (page) {
-					int cpupid = page_cpupid_last(page);
-					int this_cpu = cpupid_to_cpu(cpupid);
-					int this_pid = cpupid_to_pid(cpupid);
-
-					if (last_cpu == -1)
-						last_cpu = this_cpu;
-					if (last_pid == -1)
-						last_pid = this_pid;
-					if (last_cpu != this_cpu ||
-					    last_pid != this_pid) {
-						all_same_cpupid = false;
-					}
-
 					if (!pte_numa(oldpte)) {
 						ptent = pte_mknuma(ptent);
 						updated = true;
@@ -115,26 +99,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(pte - 1, ptl);
 
-	*ret_all_same_cpupid = all_same_cpupid;
 	return pages;
 }
 
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
-				       pmd_t *pmd)
-{
-	spin_lock(&mm->page_table_lock);
-	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
-	spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
-				       pmd_t *pmd)
-{
-	BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 		pud_t *pud, unsigned long addr, unsigned long end,
 		pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -142,7 +109,6 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 	pmd_t *pmd;
 	unsigned long next;
 	unsigned long pages = 0;
-	bool all_same_cpupid;
 
 	pmd = pmd_offset(pud, addr);
 	do {
@@ -168,17 +134,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
-				 dirty_accountable, prot_numa, &all_same_cpupid);
+				 dirty_accountable, prot_numa);
 		pages += this_pages;
-
-		/*
-		 * If we are changing protections for NUMA hinting faults then
-		 * set pmd_numa if the examined pages were all on the same
-		 * node. This allows a regular PMD to be handled as one fault
-		 * and effectively batches the taking of the PTL
-		 */
-		if (prot_numa && this_pages && all_same_cpupid)
-			change_pmd_protnuma(vma->vm_mm, addr, pmd);
 	} while (pmd++, addr = next, addr != end);
 
 	return pages;
-- 
1.8.4


WARNING: multiple messages have this Message-ID
From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>, Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 47/63] mm: numa: Do not batch handle PMD pages
Date: Mon,  7 Oct 2013 11:29:25 +0100	[thread overview]
Message-ID: <1381141781-10992-48-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1381141781-10992-1-git-send-email-mgorman@suse.de>

With the THP migration races closed it is still possible to occasionally
see corruption. The problem is related to handling PMD pages in batch.
When a page fault is handled it can be assumed that the page being
faulted will also be flushed from the TLB. The same flushing does not
happen when handling PMD pages in batch. Fixing is straight forward but
there are a number of reasons not to

1. Multiple TLB flushes may have to be sent depending on what pages get
   migrated
2. The handling of PMDs in batch means that faults get accounted to
   the task that is handling the fault. While care is taken to only
   mark PMDs where the last CPU and PID match it can still have problems
   due to PID truncation when matching PIDs.
3. Batching on the PMD level may reduce faults but setting pmd_numa
   requires taking a heavy lock that can contend with THP migration
   and handling the fault requires the release/acquisition of the PTL
   for every page migrated. It's still pretty heavy.

PMD batch handling is not something that people ever have been happy
with. This patch removes it and later patches will deal with the
additional fault overhead using more installigent migrate rate adaption.

Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 mm/memory.c   | 101 ++--------------------------------------------------------
 mm/mprotect.c |  47 ++-------------------------
 2 files changed, 4 insertions(+), 144 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index eba846b..9898eeb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3606,103 +3606,6 @@ out:
 	return 0;
 }
 
-/* NUMA hinting page fault entry point for regular pmds */
-#ifdef CONFIG_NUMA_BALANCING
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		     unsigned long addr, pmd_t *pmdp)
-{
-	pmd_t pmd;
-	pte_t *pte, *orig_pte;
-	unsigned long _addr = addr & PMD_MASK;
-	unsigned long offset;
-	spinlock_t *ptl;
-	bool numa = false;
-	int last_cpupid;
-
-	spin_lock(&mm->page_table_lock);
-	pmd = *pmdp;
-	if (pmd_numa(pmd)) {
-		set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
-		numa = true;
-	}
-	spin_unlock(&mm->page_table_lock);
-
-	if (!numa)
-		return 0;
-
-	/* we're in a page fault so some vma must be in the range */
-	BUG_ON(!vma);
-	BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
-	offset = max(_addr, vma->vm_start) & ~PMD_MASK;
-	VM_BUG_ON(offset >= PMD_SIZE);
-	orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
-	pte += offset >> PAGE_SHIFT;
-	for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
-		pte_t pteval = *pte;
-		struct page *page;
-		int page_nid = -1;
-		int target_nid;
-		bool migrated = false;
-		int flags = 0;
-
-		if (!pte_present(pteval))
-			continue;
-		if (!pte_numa(pteval))
-			continue;
-		if (addr >= vma->vm_end) {
-			vma = find_vma(mm, addr);
-			/* there's a pte present so there must be a vma */
-			BUG_ON(!vma);
-			BUG_ON(addr < vma->vm_start);
-		}
-		if (pte_numa(pteval)) {
-			pteval = pte_mknonnuma(pteval);
-			set_pte_at(mm, addr, pte, pteval);
-		}
-		page = vm_normal_page(vma, addr, pteval);
-		if (unlikely(!page))
-			continue;
-
-		/*
-		 * Avoid grouping on DSO/COW pages in specific and RO pages
-		 * in general, RO pages shouldn't hurt as much anyway since
-		 * they can be in shared cache state.
-		 */
-		if (!pte_write(pteval))
-			flags |= TNF_NO_GROUP;
-
-		last_cpupid = page_cpupid_last(page);
-		page_nid = page_to_nid(page);
-		target_nid = numa_migrate_prep(page, vma, addr, page_nid);
-		pte_unmap_unlock(pte, ptl);
-		if (target_nid != -1) {
-			migrated = migrate_misplaced_page(page, vma, target_nid);
-			if (migrated) {
-				page_nid = target_nid;
-				flags |= TNF_MIGRATED;
-			}
-		} else {
-			put_page(page);
-		}
-
-		if (page_nid != -1)
-			task_numa_fault(last_cpupid, page_nid, 1, flags);
-
-		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
-	}
-	pte_unmap_unlock(orig_pte, ptl);
-
-	return 0;
-}
-#else
-static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
-		     unsigned long addr, pmd_t *pmdp)
-{
-	BUG();
-	return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3841,8 +3744,8 @@ retry:
 		}
 	}
 
-	if (pmd_numa(*pmd))
-		return do_pmd_numa_page(mm, vma, address, pmd);
+	/* THP should already have been handled */
+	BUG_ON(pmd_numa(*pmd));
 
 	/*
 	 * Use __pte_alloc instead of pte_alloc_map, because we can't
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9a74855..a0302ac 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,15 +37,12 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
-		int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
+		int dirty_accountable, int prot_numa)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t *pte, oldpte;
 	spinlock_t *ptl;
 	unsigned long pages = 0;
-	bool all_same_cpupid = true;
-	int last_cpu = -1;
-	int last_pid = -1;
 
 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 	arch_enter_lazy_mmu_mode();
@@ -64,19 +61,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
 				page = vm_normal_page(vma, addr, oldpte);
 				if (page) {
-					int cpupid = page_cpupid_last(page);
-					int this_cpu = cpupid_to_cpu(cpupid);
-					int this_pid = cpupid_to_pid(cpupid);
-
-					if (last_cpu == -1)
-						last_cpu = this_cpu;
-					if (last_pid == -1)
-						last_pid = this_pid;
-					if (last_cpu != this_cpu ||
-					    last_pid != this_pid) {
-						all_same_cpupid = false;
-					}
-
 					if (!pte_numa(oldpte)) {
 						ptent = pte_mknuma(ptent);
 						updated = true;
@@ -115,26 +99,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(pte - 1, ptl);
 
-	*ret_all_same_cpupid = all_same_cpupid;
 	return pages;
 }
 
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
-				       pmd_t *pmd)
-{
-	spin_lock(&mm->page_table_lock);
-	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
-	spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
-				       pmd_t *pmd)
-{
-	BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 		pud_t *pud, unsigned long addr, unsigned long end,
 		pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -142,7 +109,6 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 	pmd_t *pmd;
 	unsigned long next;
 	unsigned long pages = 0;
-	bool all_same_cpupid;
 
 	pmd = pmd_offset(pud, addr);
 	do {
@@ -168,17 +134,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
-				 dirty_accountable, prot_numa, &all_same_cpupid);
+				 dirty_accountable, prot_numa);
 		pages += this_pages;
-
-		/*
-		 * If we are changing protections for NUMA hinting faults then
-		 * set pmd_numa if the examined pages were all on the same
-		 * node. This allows a regular PMD to be handled as one fault
-		 * and effectively batches the taking of the PTL
-		 */
-		if (prot_numa && this_pages && all_same_cpupid)
-			change_pmd_protnuma(vma->vm_mm, addr, pmd);
 	} while (pmd++, addr = next, addr != end);
 
 	return pages;
-- 
1.8.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-10-07 10:30 UTC|newest]

Thread overview: 340+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-10-07 10:28 [PATCH 0/63] Basic scheduler support for automatic NUMA balancing V9 Mel Gorman
2013-10-07 10:28 ` Mel Gorman
2013-10-07 10:28 ` [PATCH 01/63] hotplug: Optimize {get,put}_online_cpus() Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 10:28 ` [PATCH 02/63] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 12:46   ` Rik van Riel
2013-10-07 12:46     ` Rik van Riel
2013-10-09 17:24   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 03/63] sched, numa: Comment fixlets Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 12:46   ` Rik van Riel
2013-10-07 12:46     ` Rik van Riel
2013-10-09 17:24   ` [tip:sched/core] sched/numa: Fix comments tip-bot for Peter Zijlstra
2013-10-07 10:28 ` [PATCH 04/63] mm: numa: Do not account for a hinting fault if we raced Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 12:47   ` Rik van Riel
2013-10-07 12:47     ` Rik van Riel
2013-10-09 17:24   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:42   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 05/63] mm: Wait for THP migrations to complete during NUMA hinting faults Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 13:55   ` Rik van Riel
2013-10-07 13:55     ` Rik van Riel
2013-10-09 17:24   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:42   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 06/63] mm: Prevent parallel splits during THP migration Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 14:01   ` Rik van Riel
2013-10-07 14:01     ` Rik van Riel
2013-10-09 17:24   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:42   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 07/63] mm: numa: Sanitize task_numa_fault() callsites Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 14:02   ` Rik van Riel
2013-10-07 14:02     ` Rik van Riel
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:42   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 08/63] mm: Close races between THP migration and PMD numa clearing Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 14:02   ` Rik van Riel
2013-10-07 14:02     ` Rik van Riel
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:42   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 09/63] mm: Account for a THP NUMA hinting update as one PTE update Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 14:02   ` Rik van Riel
2013-10-07 14:02     ` Rik van Riel
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-29 10:43   ` [tip:core/urgent] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 10/63] mm: Do not flush TLB during protection change if !pte_present && !migration_entry Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 15:12   ` Rik van Riel
2013-10-07 15:12     ` Rik van Riel
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 11/63] mm: Only flush TLBs if a transhuge PMD is modified for NUMA pte scanning Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 12/63] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:10   ` Rik van Riel
2013-10-07 17:10     ` Rik van Riel
2013-10-09 17:25   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 13/63] sched: numa: Mitigate chance that same task always updates PTEs Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:24   ` Rik van Riel
2013-10-07 17:24     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] sched/numa: " tip-bot for Peter Zijlstra
2013-10-07 10:28 ` [PATCH 14/63] sched: numa: Continue PTE scanning even if migrate rate limited Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:24   ` Rik van Riel
2013-10-07 17:24     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] sched/numa: " tip-bot for Peter Zijlstra
2013-10-07 10:28 ` [PATCH 15/63] Revert "mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node" Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:42   ` Rik van Riel
2013-10-07 17:42     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 16/63] sched: numa: Initialise numa_next_scan properly Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:44   ` Rik van Riel
2013-10-07 17:44     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 17/63] sched: Set the scan rate proportional to the memory usage of the task being scanned Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 17:44   ` Rik van Riel
2013-10-07 17:44     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 18/63] sched: numa: Slow scan rate if no NUMA hinting faults are being recorded Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 18:02   ` Rik van Riel
2013-10-07 18:02     ` Rik van Riel
2013-10-09 17:26   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 19/63] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 18:02   ` Rik van Riel
2013-10-07 18:02     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-12-04  5:32   ` [PATCH 19/63] sched: " Wanpeng Li
2013-12-04  5:37     ` Wanpeng Li
2013-10-07 10:28 ` [PATCH 20/63] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 18:04   ` Rik van Riel
2013-10-07 18:04     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:28 ` [PATCH 21/63] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-10-07 10:28   ` Mel Gorman
2013-10-07 18:39   ` Rik van Riel
2013-10-07 18:39     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 22/63] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:39   ` Rik van Riel
2013-10-07 18:39     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 23/63] sched: Resist moving tasks towards nodes with fewer hinting faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:40   ` Rik van Riel
2013-10-07 18:40     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 24/63] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:40   ` Rik van Riel
2013-10-07 18:40     ` Rik van Riel
2013-10-09 17:27   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 25/63] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:41   ` Rik van Riel
2013-10-07 18:41     ` Rik van Riel
2013-10-09 17:28   ` [tip:sched/core] sched/numa: Add infrastructure for split shared/ private " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 26/63] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:41   ` Rik van Riel
2013-10-07 18:41     ` Rik van Riel
2013-10-09 17:28   ` [tip:sched/core] sched/numa: Check current-> mm " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 27/63] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:43   ` Rik van Riel
2013-10-07 18:43     ` Rik van Riel
2013-10-09 17:28   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 28/63] sched: Remove check that skips small VMAs Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:44   ` Rik van Riel
2013-10-07 18:44     ` Rik van Riel
2013-10-09 17:28   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 29/63] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:45   ` Rik van Riel
2013-10-07 18:45     ` Rik van Riel
2013-10-09 17:28   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 30/63] sched: Do not migrate memory immediately after switching node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:28   ` [tip:sched/core] sched/numa: " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 31/63] mm: numa: only unmap migrate-on-fault VMAs Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:29   ` [tip:sched/core] mm: numa: Limit NUMA scanning to " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 32/63] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:58   ` Rik van Riel
2013-10-07 18:58     ` Rik van Riel
2013-10-09 17:29   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 33/63] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 18:58   ` Rik van Riel
2013-10-07 18:58     ` Rik van Riel
2013-10-09 17:29   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 34/63] sched: numa: increment numa_migrate_seq when task runs in correct location Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:29   ` [tip:sched/core] sched/numa: Increment " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 35/63] sched: numa: Do not trap hinting faults for shared libraries Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:04   ` Rik van Riel
2013-10-07 19:04     ` Rik van Riel
2013-10-09 17:29   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 36/63] mm: numa: Only trap pmd hinting faults if we would otherwise trap PTE faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:06   ` Rik van Riel
2013-10-07 19:06     ` Rik van Riel
2013-10-09 17:29   ` [tip:sched/core] mm: numa: Trap pmd hinting faults only " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 37/63] stop_machine: Introduce stop_two_cpus() Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:30   ` [tip:sched/core] " tip-bot for Peter Zijlstra
2013-10-07 10:29 ` [PATCH 38/63] sched: Introduce migrate_swap() Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:06   ` Rik van Riel
2013-10-07 19:06     ` Rik van Riel
2013-10-09 17:30   ` [tip:sched/core] sched/numa: " tip-bot for Peter Zijlstra
2013-10-10 18:17     ` Peter Zijlstra
2013-10-10 19:04       ` Rik van Riel
2013-10-15  9:55       ` Mel Gorman
2013-10-17 16:49       ` [tip:sched/core] sched: Fix race in migrate_swap_stop() tip-bot for Peter Zijlstra
2013-10-07 10:29 ` [PATCH 39/63] sched: numa: Use a system-wide search to find swap/migration candidates Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:07   ` Rik van Riel
2013-10-07 19:07     ` Rik van Riel
2013-10-09 17:30   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 40/63] sched: numa: Favor placing a task on the preferred node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:07   ` Rik van Riel
2013-10-07 19:07     ` Rik van Riel
2013-10-09 17:30   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 41/63] sched: numa: fix placement of workloads spread across multiple nodes Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:30   ` [tip:sched/core] sched/numa: Fix " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 42/63] mm: numa: Change page last {nid,pid} into {cpu,pid} Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:08   ` Rik van Riel
2013-10-07 19:08     ` Rik van Riel
2013-10-09 17:30   ` [tip:sched/core] mm: numa: Change page last {nid,pid} into {cpu, pid} tip-bot for Peter Zijlstra
2013-10-07 10:29 ` [PATCH 43/63] sched: numa: Use {cpu, pid} to create task groups for shared faults Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:09   ` Rik van Riel
2013-10-07 19:09     ` Rik van Riel
2013-10-09 17:31   ` [tip:sched/core] sched/numa: " tip-bot for Peter Zijlstra
2013-10-07 10:29 ` [PATCH 44/63] sched: numa: Report a NUMA task group ID Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:09   ` Rik van Riel
2013-10-07 19:09     ` Rik van Riel
2013-10-09 17:31   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 45/63] mm: numa: copy cpupid on page migration Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:31   ` [tip:sched/core] mm: numa: Copy " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 46/63] mm: numa: Do not group on RO pages Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:10   ` Rik van Riel
2013-10-07 19:10     ` Rik van Riel
2013-10-09 17:31   ` [tip:sched/core] " tip-bot for Peter Zijlstra
2013-10-07 10:29 ` Mel Gorman [this message]
2013-10-07 10:29   ` [PATCH 47/63] mm: numa: Do not batch handle PMD pages Mel Gorman
2013-10-07 19:11   ` Rik van Riel
2013-10-07 19:11     ` Rik van Riel
2013-10-09 17:31   ` [tip:sched/core] " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 48/63] sched: numa: stay on the same node if CLONE_VM Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:31   ` [tip:sched/core] sched/numa: Stay " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 49/63] sched: numa: use group fault statistics in numa placement Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:32   ` [tip:sched/core] sched/numa: Use " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 50/63] sched: numa: call task_numa_free from do_execve Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:32   ` [tip:sched/core] sched/numa: Call task_numa_free() from do_execve () tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 51/63] sched: numa: Prevent parallel updates to group stats during placement Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:13   ` Rik van Riel
2013-10-07 19:13     ` Rik van Riel
2013-10-09 17:32   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 52/63] sched: numa: add debugging Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:13   ` Rik van Riel
2013-10-07 19:13     ` Rik van Riel
2013-10-09 17:32   ` [tip:sched/core] sched/numa: Add debugging tip-bot for Ingo Molnar
2013-10-07 10:29 ` [PATCH 53/63] sched: numa: Decide whether to favour task or group weights based on swap candidate relationships Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:32   ` [tip:sched/core] sched/numa: " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 54/63] sched: numa: fix task or group comparison Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:32   ` [tip:sched/core] sched/numa: Fix " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 55/63] sched: numa: Avoid migrating tasks that are placed on their preferred node Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:14   ` Rik van Riel
2013-10-07 19:14     ` Rik van Riel
2013-10-09 17:33   ` [tip:sched/core] sched/numa: " tip-bot for Peter Zijlstra
2013-10-07 10:29 ` [PATCH 56/63] sched: numa: be more careful about joining numa groups Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:33   ` [tip:sched/core] sched/numa: Be " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 57/63] sched: numa: Take false sharing into account when adapting scan rate Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:14   ` Rik van Riel
2013-10-07 19:14     ` Rik van Riel
2013-10-09 17:33   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 58/63] sched: numa: adjust scan rate in task_numa_placement Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:33   ` [tip:sched/core] sched/numa: Adjust " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 59/63] sched: numa: Remove the numa_balancing_scan_period_reset sysctl Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:14   ` Rik van Riel
2013-10-07 19:14     ` Rik van Riel
2013-10-09 17:33   ` [tip:sched/core] sched/numa: " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 60/63] mm: numa: revert temporarily disabling of NUMA migration Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:33   ` [tip:sched/core] mm: numa: Revert " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 61/63] sched: numa: skip some page migrations after a shared fault Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:34   ` [tip:sched/core] sched/numa: Skip " tip-bot for Rik van Riel
2013-10-07 10:29 ` [PATCH 62/63] sched: numa: use unsigned longs for numa group fault stats Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-07 19:15   ` Rik van Riel
2013-10-07 19:15     ` Rik van Riel
2013-10-09 17:34   ` [tip:sched/core] sched/numa: Use " tip-bot for Mel Gorman
2013-10-07 10:29 ` [PATCH 63/63] sched: numa: periodically retry task_numa_migrate Mel Gorman
2013-10-07 10:29   ` Mel Gorman
2013-10-09 17:34   ` [tip:sched/core] sched/numa: Retry task_numa_migrate() periodically tip-bot for Rik van Riel
2013-10-09 11:03 ` [PATCH 0/63] Basic scheduler support for automatic NUMA balancing V9 Ingo Molnar
2013-10-09 11:03   ` Ingo Molnar
2013-10-09 11:11   ` Ingo Molnar
2013-10-09 11:11     ` Ingo Molnar
2013-10-09 11:13     ` Ingo Molnar
2013-10-09 11:13       ` Ingo Molnar
2013-10-09 12:05   ` Peter Zijlstra
2013-10-09 12:05     ` Peter Zijlstra
2013-10-09 12:48     ` Ingo Molnar
2013-10-09 12:48       ` Ingo Molnar
2013-10-10  7:05   ` Mel Gorman
2013-10-10  7:05     ` Mel Gorman
2013-10-09 16:28 ` Ingo Molnar
2013-10-09 16:29   ` Ingo Molnar
2013-10-09 16:57     ` Ingo Molnar
2013-10-09 16:57       ` Ingo Molnar
2013-10-09 17:09       ` Ingo Molnar
2013-10-09 17:09         ` Ingo Molnar
2013-10-09 17:11         ` Peter Zijlstra
2013-10-09 17:11           ` Peter Zijlstra
2013-10-09 17:08   ` Peter Zijlstra
2013-10-09 17:08     ` Peter Zijlstra
2013-10-09 17:15     ` Ingo Molnar
2013-10-09 17:15       ` Ingo Molnar
2013-10-09 17:18       ` Peter Zijlstra
2013-10-09 17:18         ` Peter Zijlstra
2013-10-24 12:26 ` Automatic NUMA balancing patches for tip-urgent/stable Mel Gorman
2013-10-24 12:26   ` Mel Gorman
2013-10-26 12:11   ` Ingo Molnar
2013-10-26 12:11     ` Ingo Molnar
2013-10-29  9:42     ` Mel Gorman
2013-10-29  9:42       ` Mel Gorman
2013-10-29  9:48       ` Ingo Molnar
2013-10-29  9:48         ` Ingo Molnar
2013-10-29 10:24         ` Mel Gorman
2013-10-29 10:24           ` Mel Gorman
2013-10-29 10:41           ` Ingo Molnar
2013-10-29 10:41             ` Ingo Molnar
2013-10-29 12:48             ` Mel Gorman
2013-10-29 12:48               ` Mel Gorman
2013-10-31  9:51   ` [RFC GIT PULL] NUMA-balancing memory corruption fixes Ingo Molnar
2013-10-31  9:51     ` Ingo Molnar
2013-10-31 22:25     ` Linus Torvalds
2013-10-31 22:25       ` Linus Torvalds
2013-11-01  7:36       ` Ingo Molnar
2013-11-01  7:36         ` Ingo Molnar
  -- strict thread matches above, loose matches on Subject: below --
2013-09-27 13:26 [PATCH 0/63] Basic scheduler support for automatic NUMA balancing V8 Mel Gorman
2013-09-27 13:27 ` [PATCH 47/63] mm: numa: Do not batch handle PMD pages Mel Gorman
2013-09-27 13:27   ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1381141781-10992-48-git-send-email-mgorman@suse.de \
    --to=mgorman@suse.de \
    --cc=a.p.zijlstra@chello.nl \
    --cc=aarcange@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@kernel.org \
    --cc=riel@redhat.com \
    --cc=srikar@linux.vnet.ibm.com \
    --subject='Re: [PATCH 47/63] mm: numa: Do not batch handle PMD pages' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.