All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] hugepage: Protect region tracking lists with its own spinlock
@ 2011-01-25  3:32 ` Anton Blanchard
  0 siblings, 0 replies; 30+ messages in thread
From: Anton Blanchard @ 2011-01-25  3:32 UTC (permalink / raw)
  To: dwg, mel, akpm, hughd; +Cc: linux-mm, linux-kernel


In preparation for creating a hash of spinlocks to replace the global
hugetlb_instantiation_mutex, protect the region tracking code with
its own spinlock.

Signed-off-by: Anton Blanchard <anton@samba.org> 
---

The old code locked it with either:

	down_write(&mm->mmap_sem);
or
	down_read(&mm->mmap_sem);
	mutex_lock(&hugetlb_instantiation_mutex);

I chose to keep things simple and wrap everything with a single lock.
Do we need the parallelism the old code had in the down_write case?


Index: powerpc.git/mm/hugetlb.c
===================================================================
--- powerpc.git.orig/mm/hugetlb.c	2011-01-07 12:50:52.090440484 +1100
+++ powerpc.git/mm/hugetlb.c	2011-01-07 12:52:03.922704453 +1100
@@ -56,16 +56,6 @@ static DEFINE_SPINLOCK(hugetlb_lock);
 /*
  * Region tracking -- allows tracking of reservations and instantiated pages
  *                    across the pages in a mapping.
- *
- * The region data structures are protected by a combination of the mmap_sem
- * and the hugetlb_instantion_mutex.  To access or modify a region the caller
- * must either hold the mmap_sem for write, or the mmap_sem for read and
- * the hugetlb_instantiation mutex:
- *
- * 	down_write(&mm->mmap_sem);
- * or
- * 	down_read(&mm->mmap_sem);
- * 	mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
 	struct list_head link;
@@ -73,10 +63,14 @@ struct file_region {
 	long to;
 };
 
+static DEFINE_SPINLOCK(region_lock);
+
 static long region_add(struct list_head *head, long f, long t)
 {
 	struct file_region *rg, *nrg, *trg;
 
+	spin_lock(&region_lock);
+
 	/* Locate the region we are either in or before. */
 	list_for_each_entry(rg, head, link)
 		if (f <= rg->to)
@@ -106,6 +100,7 @@ static long region_add(struct list_head
 	}
 	nrg->from = f;
 	nrg->to = t;
+	spin_unlock(&region_lock);
 	return 0;
 }
 
@@ -114,6 +109,8 @@ static long region_chg(struct list_head
 	struct file_region *rg, *nrg;
 	long chg = 0;
 
+	spin_lock(&region_lock);
+
 	/* Locate the region we are before or in. */
 	list_for_each_entry(rg, head, link)
 		if (f <= rg->to)
@@ -124,14 +121,17 @@ static long region_chg(struct list_head
 	 * size such that we can guarantee to record the reservation. */
 	if (&rg->link == head || t < rg->from) {
 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-		if (!nrg)
-			return -ENOMEM;
+		if (!nrg) {
+			chg = -ENOMEM;
+			goto out;
+		}
 		nrg->from = f;
 		nrg->to   = f;
 		INIT_LIST_HEAD(&nrg->link);
 		list_add(&nrg->link, rg->link.prev);
 
-		return t - f;
+		chg = t - f;
+		goto out;
 	}
 
 	/* Round our left edge to the current segment if it encloses us. */
@@ -144,7 +144,7 @@ static long region_chg(struct list_head
 		if (&rg->link == head)
 			break;
 		if (rg->from > t)
-			return chg;
+			goto out;
 
 		/* We overlap with this area, if it extends futher than
 		 * us then we must extend ourselves.  Account for its
@@ -155,6 +155,9 @@ static long region_chg(struct list_head
 		}
 		chg -= rg->to - rg->from;
 	}
+out:
+
+	spin_unlock(&region_lock);
 	return chg;
 }
 
@@ -163,12 +166,16 @@ static long region_truncate(struct list_
 	struct file_region *rg, *trg;
 	long chg = 0;
 
+	spin_lock(&region_lock);
+
 	/* Locate the region we are either in or before. */
 	list_for_each_entry(rg, head, link)
 		if (end <= rg->to)
 			break;
-	if (&rg->link == head)
-		return 0;
+	if (&rg->link == head) {
+		chg = 0;
+		goto out;
+	}
 
 	/* If we are in the middle of a region then adjust it. */
 	if (end > rg->from) {
@@ -185,6 +192,9 @@ static long region_truncate(struct list_
 		list_del(&rg->link);
 		kfree(rg);
 	}
+
+out:
+	spin_unlock(&region_lock);
 	return chg;
 }
 
@@ -193,6 +203,8 @@ static long region_count(struct list_hea
 	struct file_region *rg;
 	long chg = 0;
 
+	spin_lock(&region_lock);
+
 	/* Locate each segment we overlap with, and count that overlap. */
 	list_for_each_entry(rg, head, link) {
 		int seg_from;
@@ -209,6 +221,7 @@ static long region_count(struct list_hea
 		chg += seg_to - seg_from;
 	}
 
+	spin_unlock(&region_lock);
 	return chg;
 }
 

^ permalink raw reply	[flat|nested] 30+ messages in thread
* [PATCH 0/2] hugepage: optimize page fault path locking
@ 2013-07-26 14:27 Davidlohr Bueso
  2013-07-26 14:27   ` Davidlohr Bueso
  0 siblings, 1 reply; 30+ messages in thread
From: Davidlohr Bueso @ 2013-07-26 14:27 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Rik van Riel, Michel Lespinasse, Mel Gorman, Michal Hocko,
	AneeshKumarK.V, KAMEZAWA Hiroyuki, Hillf Danton, Hugh Dickins,
	Joonsoo Kim, David Gibson, Eric B Munson, Anton Blanchard,
	Konstantin Khlebnikov, linux-mm, linux-kernel, Davidlohr Bueso

This patchset attempts to reduce the amount of contention we impose
on the hugetlb_instantiation_mutex by replacing the global mutex with
a table of mutexes, selected based on a hash. The original discussion can 
be found here: http://lkml.org/lkml/2013/7/12/428

Patch 1: Allows the file region tracking list to be serialized by its own rwsem.
This is necessary because the next patch allows concurrent hugepage fault paths,
getting rid of the hugetlb_instantiation_mutex - which protects chains of struct 
file_regionin inode->i_mapping->private_list (VM_MAYSHARE) or vma_resv_map(vma)->regions 
(!VM_MAYSHARE).

Patch 2: From David Gibson, for some reason never made it into the kernel. 
Further cleanups and enhancements from Anton Blanchard and myself.
Details of how the hash key is selected is in the patch.

Davidlohr Bueso (2):
  hugepage: protect file regions with rwsem
  hugepage: allow parallelization of the hugepage fault path

 mm/hugetlb.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 106 insertions(+), 28 deletions(-)

-- 
1.7.11.7


^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2013-07-29 19:16 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-01-25  3:32 [PATCH 1/2] hugepage: Protect region tracking lists with its own spinlock Anton Blanchard
2011-01-25  3:32 ` Anton Blanchard
2011-01-25  3:34 ` [PATCH 2/2] hugepage: Allow parallelization of the hugepage fault path Anton Blanchard
2011-01-25  3:34   ` Anton Blanchard
2011-01-25 19:44   ` Eric B Munson
2011-01-26  9:24   ` Mel Gorman
2011-01-26  9:24     ` Mel Gorman
2011-07-15  6:06     ` Anton Blanchard
2011-07-15  6:06       ` Anton Blanchard
2011-07-15  6:08       ` [PATCH 1/2] hugepage: Protect region tracking lists with its own spinlock Anton Blanchard
2011-07-15  6:08         ` Anton Blanchard
2011-07-18 15:24         ` Eric B Munson
2011-07-15  6:10       ` [PATCH 2/2] hugepage: Allow parallelization of the hugepage fault path Anton Blanchard
2011-07-15  6:10         ` Anton Blanchard
2011-07-18 15:24         ` Eric B Munson
2011-07-21 10:17       ` Mel Gorman
2011-07-21 10:17         ` Mel Gorman
2011-07-15  7:52   ` Andi Kleen
2011-07-15  7:52     ` Andi Kleen
2011-07-15 13:10     ` David Gibson
2011-07-15 13:10       ` David Gibson
2011-01-25 19:43 ` [PATCH 1/2] hugepage: Protect region tracking lists with its own spinlock Eric B Munson
2011-01-26  9:07 ` Mel Gorman
2011-01-26  9:07   ` Mel Gorman
2013-07-26 14:27 [PATCH 0/2] hugepage: optimize page fault path locking Davidlohr Bueso
2013-07-26 14:27 ` [PATCH 2/2] hugepage: allow parallelization of the hugepage fault path Davidlohr Bueso
2013-07-26 14:27   ` Davidlohr Bueso
2013-07-28  6:00   ` Hillf Danton
2013-07-28  6:00     ` Hillf Danton
2013-07-29 19:16     ` Davidlohr Bueso
2013-07-29 19:16       ` Davidlohr Bueso

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.