All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Al Viro <viro@zeniv.linux.org.uk>,
	Hugh Dickins <hughd@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>, Jan Kara <jack@suse.cz>,
	Mel Gorman <mgorman@suse.de>,
	linux-mm@kvack.org, Andi Kleen <ak@linux.intel.com>,
	Matthew Wilcox <matthew.r.wilcox@intel.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	Hillf Danton <dhillf@gmail.com>,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv2, RFC 04/30] radix-tree: implement preload for multiple contiguous elements
Date: Thu, 14 Mar 2013 19:50:09 +0200	[thread overview]
Message-ID: <1363283435-7666-5-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1363283435-7666-1-git-send-email-kirill.shutemov@linux.intel.com>

From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>

Currently radix_tree_preload() only guarantees enough nodes to insert
one element. It's a hard limit. You cannot batch a number insert under
one tree_lock.

This patch introduces radix_tree_preload_count(). It allows to
preallocate nodes enough to insert a number of *contiguous* elements.

Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/radix-tree.h |    3 +++
 lib/radix-tree.c           |   32 +++++++++++++++++++++++++-------
 2 files changed, 28 insertions(+), 7 deletions(-)

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c..81318cb 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -83,6 +83,8 @@ do {									\
 	(root)->rnode = NULL;						\
 } while (0)
 
+#define RADIX_TREE_PRELOAD_NR		512 /* For THP's benefit */
+
 /**
  * Radix-tree synchronization
  *
@@ -231,6 +233,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
 unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
 				unsigned long index, unsigned long max_scan);
 int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_preload_count(unsigned size, gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e796429..9bef0ac 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -81,16 +81,24 @@ static struct kmem_cache *radix_tree_node_cachep;
  * The worst case is a zero height tree with just a single item at index 0,
  * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ *
+ * Worst case for adding N contiguous items is adding entries at indexes
+ * (ULONG_MAX - N) to ULONG_MAX. It requires nodes to insert single worst-case
+ * item plus extra nodes if you cross the boundary from one node to the next.
+ *
  * Hence:
  */
-#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+#define RADIX_TREE_PRELOAD_MIN (RADIX_TREE_MAX_PATH * 2 - 1)
+#define RADIX_TREE_PRELOAD_MAX \
+	(RADIX_TREE_PRELOAD_MIN + \
+	 DIV_ROUND_UP(RADIX_TREE_PRELOAD_NR - 1, RADIX_TREE_MAP_SIZE))
 
 /*
  * Per-cpu pool of preloaded nodes
  */
 struct radix_tree_preload {
 	int nr;
-	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
+	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_MAX];
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
@@ -257,29 +265,34 @@ radix_tree_node_free(struct radix_tree_node *node)
 
 /*
  * Load up this CPU's radix_tree_node buffer with sufficient objects to
- * ensure that the addition of a single element in the tree cannot fail.  On
- * success, return zero, with preemption disabled.  On error, return -ENOMEM
+ * ensure that the addition of *contiguous* elements in the tree cannot fail.
+ * On success, return zero, with preemption disabled.  On error, return -ENOMEM
  * with preemption not disabled.
  *
  * To make use of this facility, the radix tree must be initialised without
  * __GFP_WAIT being passed to INIT_RADIX_TREE().
  */
-int radix_tree_preload(gfp_t gfp_mask)
+int radix_tree_preload_count(unsigned size, gfp_t gfp_mask)
 {
 	struct radix_tree_preload *rtp;
 	struct radix_tree_node *node;
 	int ret = -ENOMEM;
+	int alloc = RADIX_TREE_PRELOAD_MIN +
+		DIV_ROUND_UP(size - 1, RADIX_TREE_MAP_SIZE);
+
+	if (size > RADIX_TREE_PRELOAD_NR)
+		return -ENOMEM;
 
 	preempt_disable();
 	rtp = &__get_cpu_var(radix_tree_preloads);
-	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
+	while (rtp->nr < alloc) {
 		preempt_enable();
 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 		if (node == NULL)
 			goto out;
 		preempt_disable();
 		rtp = &__get_cpu_var(radix_tree_preloads);
-		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
+		if (rtp->nr < alloc)
 			rtp->nodes[rtp->nr++] = node;
 		else
 			kmem_cache_free(radix_tree_node_cachep, node);
@@ -288,6 +301,11 @@ int radix_tree_preload(gfp_t gfp_mask)
 out:
 	return ret;
 }
+
+int radix_tree_preload(gfp_t gfp_mask)
+{
+	return radix_tree_preload_count(1, gfp_mask);
+}
 EXPORT_SYMBOL(radix_tree_preload);
 
 /*
-- 
1.7.10.4


WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Al Viro <viro@zeniv.linux.org.uk>,
	Hugh Dickins <hughd@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>, Jan Kara <jack@suse.cz>,
	Mel Gorman <mgorman@suse.de>,
	linux-mm@kvack.org, Andi Kleen <ak@linux.intel.com>,
	Matthew Wilcox <matthew.r.wilcox@intel.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	Hillf Danton <dhillf@gmail.com>,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv2, RFC 04/30] radix-tree: implement preload for multiple contiguous elements
Date: Thu, 14 Mar 2013 19:50:09 +0200	[thread overview]
Message-ID: <1363283435-7666-5-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1363283435-7666-1-git-send-email-kirill.shutemov@linux.intel.com>

From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>

Currently radix_tree_preload() only guarantees enough nodes to insert
one element. It's a hard limit. You cannot batch a number insert under
one tree_lock.

This patch introduces radix_tree_preload_count(). It allows to
preallocate nodes enough to insert a number of *contiguous* elements.

Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/radix-tree.h |    3 +++
 lib/radix-tree.c           |   32 +++++++++++++++++++++++++-------
 2 files changed, 28 insertions(+), 7 deletions(-)

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c..81318cb 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -83,6 +83,8 @@ do {									\
 	(root)->rnode = NULL;						\
 } while (0)
 
+#define RADIX_TREE_PRELOAD_NR		512 /* For THP's benefit */
+
 /**
  * Radix-tree synchronization
  *
@@ -231,6 +233,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
 unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
 				unsigned long index, unsigned long max_scan);
 int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_preload_count(unsigned size, gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e796429..9bef0ac 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -81,16 +81,24 @@ static struct kmem_cache *radix_tree_node_cachep;
  * The worst case is a zero height tree with just a single item at index 0,
  * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ *
+ * Worst case for adding N contiguous items is adding entries at indexes
+ * (ULONG_MAX - N) to ULONG_MAX. It requires nodes to insert single worst-case
+ * item plus extra nodes if you cross the boundary from one node to the next.
+ *
  * Hence:
  */
-#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+#define RADIX_TREE_PRELOAD_MIN (RADIX_TREE_MAX_PATH * 2 - 1)
+#define RADIX_TREE_PRELOAD_MAX \
+	(RADIX_TREE_PRELOAD_MIN + \
+	 DIV_ROUND_UP(RADIX_TREE_PRELOAD_NR - 1, RADIX_TREE_MAP_SIZE))
 
 /*
  * Per-cpu pool of preloaded nodes
  */
 struct radix_tree_preload {
 	int nr;
-	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
+	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_MAX];
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
@@ -257,29 +265,34 @@ radix_tree_node_free(struct radix_tree_node *node)
 
 /*
  * Load up this CPU's radix_tree_node buffer with sufficient objects to
- * ensure that the addition of a single element in the tree cannot fail.  On
- * success, return zero, with preemption disabled.  On error, return -ENOMEM
+ * ensure that the addition of *contiguous* elements in the tree cannot fail.
+ * On success, return zero, with preemption disabled.  On error, return -ENOMEM
  * with preemption not disabled.
  *
  * To make use of this facility, the radix tree must be initialised without
  * __GFP_WAIT being passed to INIT_RADIX_TREE().
  */
-int radix_tree_preload(gfp_t gfp_mask)
+int radix_tree_preload_count(unsigned size, gfp_t gfp_mask)
 {
 	struct radix_tree_preload *rtp;
 	struct radix_tree_node *node;
 	int ret = -ENOMEM;
+	int alloc = RADIX_TREE_PRELOAD_MIN +
+		DIV_ROUND_UP(size - 1, RADIX_TREE_MAP_SIZE);
+
+	if (size > RADIX_TREE_PRELOAD_NR)
+		return -ENOMEM;
 
 	preempt_disable();
 	rtp = &__get_cpu_var(radix_tree_preloads);
-	while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
+	while (rtp->nr < alloc) {
 		preempt_enable();
 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 		if (node == NULL)
 			goto out;
 		preempt_disable();
 		rtp = &__get_cpu_var(radix_tree_preloads);
-		if (rtp->nr < ARRAY_SIZE(rtp->nodes))
+		if (rtp->nr < alloc)
 			rtp->nodes[rtp->nr++] = node;
 		else
 			kmem_cache_free(radix_tree_node_cachep, node);
@@ -288,6 +301,11 @@ int radix_tree_preload(gfp_t gfp_mask)
 out:
 	return ret;
 }
+
+int radix_tree_preload(gfp_t gfp_mask)
+{
+	return radix_tree_preload_count(1, gfp_mask);
+}
 EXPORT_SYMBOL(radix_tree_preload);
 
 /*
-- 
1.7.10.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-03-14 17:58 UTC|newest]

Thread overview: 243+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-14 17:50 [PATCHv2, RFC 00/30] Transparent huge page cache Kirill A. Shutemov
2013-03-14 17:50 ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 01/30] block: implement add_bdi_stat() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 14:46   ` Dave Hansen
2013-03-21 14:46     ` Dave Hansen
2013-03-21 17:19     ` Kirill A. Shutemov
2013-03-21 17:19       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 02/30] mm: implement zero_huge_user_segment and friends Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 15:23   ` Dave Hansen
2013-03-21 15:23     ` Dave Hansen
2013-03-22  9:21     ` Kirill A. Shutemov
2013-03-22  9:21       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 03/30] mm: drop actor argument of do_generic_file_read() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  0:21   ` Hillf Danton
2013-03-15  0:21     ` Hillf Danton
2013-03-15  0:27   ` Hillf Danton
2013-03-15  0:27     ` Hillf Danton
2013-03-15 13:22     ` Kirill A. Shutemov
2013-03-15 13:22       ` Kirill A. Shutemov
2013-03-21 15:26       ` Dave Hansen
2013-03-21 15:26         ` Dave Hansen
2013-03-14 17:50 ` Kirill A. Shutemov [this message]
2013-03-14 17:50   ` [PATCHv2, RFC 04/30] radix-tree: implement preload for multiple contiguous elements Kirill A. Shutemov
2013-03-21 15:56   ` Dave Hansen
2013-03-21 15:56     ` Dave Hansen
2013-03-22  9:47     ` Kirill A. Shutemov
2013-03-22  9:47       ` Kirill A. Shutemov
2013-03-22 14:38       ` Dave Hansen
2013-03-22 14:38         ` Dave Hansen
2013-03-25 13:03         ` Kirill A. Shutemov
2013-03-25 13:03           ` Kirill A. Shutemov
2013-04-05  3:37   ` Ric Mason
2013-04-05  3:37     ` Ric Mason
2013-03-14 17:50 ` [PATCHv2, RFC 05/30] thp, mm: avoid PageUnevictable on active/inactive lru lists Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 16:15   ` Dave Hansen
2013-03-21 16:15     ` Dave Hansen
2013-03-22 10:11     ` Kirill A. Shutemov
2013-03-22 10:11       ` Kirill A. Shutemov
2013-04-05  3:42       ` Ric Mason
2013-04-05  3:42         ` Ric Mason
2013-03-14 17:50 ` [PATCHv2, RFC 06/30] thp, mm: basic defines for transparent huge page cache Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 07/30] thp, mm: introduce mapping_can_have_hugepages() predicate Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 16:21   ` Dave Hansen
2013-03-21 16:21     ` Dave Hansen
2013-03-22 10:12     ` Kirill A. Shutemov
2013-03-22 10:12       ` Kirill A. Shutemov
2013-03-22 14:44       ` Dave Hansen
2013-03-22 14:44         ` Dave Hansen
2013-04-02 14:46         ` Kirill A. Shutemov
2013-04-02 14:46           ` Kirill A. Shutemov
2013-04-05  3:45   ` Ric Mason
2013-04-05  3:45     ` Ric Mason
2013-04-05  3:48     ` Ric Mason
2013-04-05  3:48       ` Ric Mason
2013-03-14 17:50 ` [PATCHv2, RFC 08/30] thp, mm: rewrite add_to_page_cache_locked() to support huge pages Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  1:30   ` Hillf Danton
2013-03-15  1:30     ` Hillf Danton
2013-03-15 13:23     ` Kirill A. Shutemov
2013-03-15 13:23       ` Kirill A. Shutemov
2013-03-15 13:25       ` Hillf Danton
2013-03-15 13:25         ` Hillf Danton
2013-03-15 13:50         ` Kirill A. Shutemov
2013-03-15 13:50           ` Kirill A. Shutemov
2013-03-15 13:55           ` Hillf Danton
2013-03-15 13:55             ` Hillf Danton
2013-03-15 15:05             ` Kirill A. Shutemov
2013-03-15 15:05               ` Kirill A. Shutemov
2013-03-21 17:11   ` Dave Hansen
2013-03-21 17:11     ` Dave Hansen
2013-03-22 10:34     ` Kirill A. Shutemov
2013-03-22 10:34       ` Kirill A. Shutemov
2013-03-22 14:51       ` Dave Hansen
2013-03-22 14:51         ` Dave Hansen
2013-03-14 17:50 ` [PATCHv2, RFC 09/30] thp, mm: rewrite delete_from_page_cache() " Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  2:25   ` Hillf Danton
2013-03-15  2:25     ` Hillf Danton
2013-03-15 13:23     ` Kirill A. Shutemov
2013-03-15 13:23       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 10/30] thp, mm: locking tail page is a bug Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 17:20   ` Dave Hansen
2013-03-21 17:20     ` Dave Hansen
2013-03-14 17:50 ` [PATCHv2, RFC 11/30] thp, mm: handle tail pages in page_cache_get_speculative() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-04-05  4:03   ` Ric Mason
2013-04-05  4:03     ` Ric Mason
2013-03-14 17:50 ` [PATCHv2, RFC 12/30] thp, mm: add event counters for huge page alloc on write to a file Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-21 17:59   ` Dave Hansen
2013-03-21 17:59     ` Dave Hansen
2013-03-26  8:40     ` Kirill A. Shutemov
2013-03-26  8:40       ` Kirill A. Shutemov
2013-04-05  4:05       ` Ric Mason
2013-04-05  4:05         ` Ric Mason
2013-03-14 17:50 ` [PATCHv2, RFC 13/30] thp, mm: implement grab_cache_huge_page_write_begin() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  2:34   ` Hillf Danton
2013-03-15  2:34     ` Hillf Danton
2013-03-15 13:24     ` Kirill A. Shutemov
2013-03-15 13:24       ` Kirill A. Shutemov
2013-03-15 13:30       ` Hillf Danton
2013-03-15 13:30         ` Hillf Danton
2013-03-15 13:35         ` Kirill A. Shutemov
2013-03-15 13:35           ` Kirill A. Shutemov
2013-03-15 13:37           ` Hillf Danton
2013-03-15 13:37             ` Hillf Danton
2013-03-21 18:15   ` Dave Hansen
2013-03-21 18:15     ` Dave Hansen
2013-03-26 10:48     ` Kirill A. Shutemov
2013-03-26 10:48       ` Kirill A. Shutemov
2013-03-26 15:40       ` Dave
2013-03-26 15:40         ` Dave
2013-03-21 18:16   ` Dave Hansen
2013-03-21 18:16     ` Dave Hansen
2013-03-14 17:50 ` [PATCHv2, RFC 14/30] thp, mm: naive support of thp in generic read/write routines Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  3:11   ` Hillf Danton
2013-03-15  3:11     ` Hillf Danton
2013-03-15 13:27     ` Kirill A. Shutemov
2013-03-15 13:27       ` Kirill A. Shutemov
2013-03-22 15:22   ` Dave Hansen
2013-03-22 15:22     ` Dave Hansen
2013-03-28 12:25     ` Kirill A. Shutemov
2013-03-28 12:25       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 15/30] thp, libfs: initial support of thp in simple_read/write_begin/write_end Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-22 18:01   ` Dave
2013-03-22 18:01     ` Dave
2013-03-28 14:29     ` Kirill A. Shutemov
2013-03-28 14:29       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 16/30] thp: handle file pages in split_huge_page() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  6:15   ` Hillf Danton
2013-03-15  6:15     ` Hillf Danton
2013-03-15 13:26     ` Kirill A. Shutemov
2013-03-15 13:26       ` Kirill A. Shutemov
2013-03-15 13:33       ` Hillf Danton
2013-03-15 13:33         ` Hillf Danton
2013-03-22 18:18   ` Dave
2013-03-22 18:18     ` Dave
2013-03-28 14:32     ` Kirill A. Shutemov
2013-03-28 14:32       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 17/30] thp: wait_split_huge_page(): serialize over i_mmap_mutex too Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-22 18:22   ` Dave
2013-03-22 18:22     ` Dave
2013-03-28 15:08     ` Kirill A. Shutemov
2013-03-28 15:08       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 18/30] thp, mm: truncate support for transparent huge page cache Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-22 18:29   ` Dave
2013-03-22 18:29     ` Dave
2013-03-28 15:31     ` Kirill A. Shutemov
2013-03-28 15:31       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 19/30] thp, mm: split huge page on mmap file page Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  6:58   ` Hillf Danton
2013-03-15  6:58     ` Hillf Danton
2013-03-15 13:29     ` Kirill A. Shutemov
2013-03-15 13:29       ` Kirill A. Shutemov
2013-03-15 13:35       ` Hillf Danton
2013-03-15 13:35         ` Hillf Danton
2013-03-15 13:45         ` Kirill A. Shutemov
2013-03-15 13:45           ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 20/30] ramfs: enable transparent huge page cache Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-04-02 16:28   ` Kirill A. Shutemov
2013-04-02 16:28     ` Kirill A. Shutemov
2013-04-02 22:15     ` Hugh Dickins
2013-04-02 22:15       ` Hugh Dickins
2013-04-03  1:11       ` Minchan Kim
2013-04-03  1:11         ` Minchan Kim
2013-04-05  6:47         ` Simon Jeons
2013-04-05  6:47           ` Simon Jeons
2013-04-05  8:01           ` Minchan Kim
2013-04-05  8:01             ` Minchan Kim
2013-04-05  8:22             ` Wanpeng Li
2013-04-05  8:22             ` Wanpeng Li
     [not found]             ` <515e89d2.e725320a.3a74.7fe7SMTPIN_ADDED_BROKEN@mx.google.com>
2013-04-05  8:31               ` Minchan Kim
2013-04-05  8:31                 ` Minchan Kim
2013-04-05  8:35                 ` Wanpeng Li
2013-04-05  8:35                 ` Wanpeng Li
2013-04-05 13:46                 ` Christoph Lameter
2013-04-05 13:46                   ` Christoph Lameter
2013-04-03 13:53       ` Christoph Lameter
2013-04-03 13:53         ` Christoph Lameter
2013-03-14 17:50 ` [PATCHv2, RFC 21/30] x86-64, mm: proper alignment mappings with hugepages Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-22 18:37   ` Dave
2013-03-22 18:37     ` Dave
2013-03-14 17:50 ` [PATCHv2, RFC 22/30] mm: add huge_fault() callback to vm_operations_struct Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 23/30] thp: prepare zap_huge_pmd() to uncharge file pages Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  7:09   ` Hillf Danton
2013-03-15  7:09     ` Hillf Danton
2013-03-15 13:30     ` Kirill A. Shutemov
2013-03-15 13:30       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 24/30] thp: move maybe_pmd_mkwrite() out of mk_huge_pmd() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  7:31   ` Hillf Danton
2013-03-15  7:31     ` Hillf Danton
2013-03-14 17:50 ` [PATCHv2, RFC 25/30] thp, mm: basic huge_fault implementation for generic_file_vm_ops Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  7:44   ` Hillf Danton
2013-03-15  7:44     ` Hillf Danton
2013-03-15 13:30     ` Kirill A. Shutemov
2013-03-15 13:30       ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 26/30] thp: extract fallback path from do_huge_pmd_anonymous_page() to a function Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 27/30] thp: initial implementation of do_huge_linear_fault() Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 28/30] thp: handle write-protect exception to file-backed huge pages Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 29/30] thp: call __vma_adjust_trans_huge() for file-backed VMA Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-14 17:50 ` [PATCHv2, RFC 30/30] thp: map file-backed huge pages on fault Kirill A. Shutemov
2013-03-14 17:50   ` Kirill A. Shutemov
2013-03-15  0:33 ` [PATCHv2, RFC 00/30] Transparent huge page cache Hillf Danton
2013-03-15  0:33   ` Hillf Danton
2013-03-15 13:33   ` Kirill A. Shutemov
2013-03-15 13:33     ` Kirill A. Shutemov
2013-03-18  4:03 ` Simon Jeons
2013-03-18  4:03   ` Simon Jeons
2013-03-18  5:23   ` Simon Jeons
2013-03-18 11:19     ` Kirill A. Shutemov
2013-03-18 11:19       ` Kirill A. Shutemov
2013-03-18 11:29       ` Simon Jeons
2013-03-18 11:29         ` Simon Jeons
2013-03-18 11:42         ` Kirill A. Shutemov
2013-03-18 11:42           ` Kirill A. Shutemov
2013-03-18 11:42           ` Ric Mason
2013-03-18 11:42             ` Ric Mason
2013-03-20  1:09 ` Simon Jeons
2013-03-20  1:09   ` Simon Jeons

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1363283435-7666-5-git-send-email-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=aarcange@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=dhillf@gmail.com \
    --cc=fengguang.wu@intel.com \
    --cc=hughd@google.com \
    --cc=jack@suse.cz \
    --cc=kirill@shutemov.name \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=matthew.r.wilcox@intel.com \
    --cc=mgorman@suse.de \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.