All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mel Gorman <mel@csn.ul.ie>
To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org
Cc: Dave Chinner <david@fromorbit.com>,
	Chris Mason <chris.mason@oracle.com>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Christoph Hellwig <hch@infradead.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mel@csn.ul.ie>
Subject: [PATCH 11/12] vmscan: Write out dirty pages in batch
Date: Mon, 14 Jun 2010 12:17:52 +0100	[thread overview]
Message-ID: <1276514273-27693-12-git-send-email-mel@csn.ul.ie> (raw)
In-Reply-To: <1276514273-27693-1-git-send-email-mel@csn.ul.ie>

Page reclaim cleans individual pages using a_ops->writepage() because from
the VM perspective, it is known that pages in a particular zone must be freed
soon, it considers the target page to be the oldest and it does not want
to wait while background flushers cleans other pages. From a filesystem
perspective this is extremely inefficient as it generates a very seeky
IO pattern leading to the perverse situation where it can take longer to
clean all dirty pages than it would have otherwise.

This patch queues all dirty pages at once to maximise the chances that
the write requests get merged efficiently. It also makes the next patch
that avoids writeout from direct reclaim more straight-forward.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
---
 mm/vmscan.c |  175 ++++++++++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 131 insertions(+), 44 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 019f0af..4856a2a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -323,6 +323,55 @@ typedef enum {
 	PAGE_CLEAN,
 } pageout_t;
 
+int write_reclaim_page(struct page *page, struct address_space *mapping,
+						enum pageout_io sync_writeback)
+{
+	int res;
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_NONE,
+		.nr_to_write = SWAP_CLUSTER_MAX,
+		.range_start = 0,
+		.range_end = LLONG_MAX,
+		.nonblocking = 1,
+		.for_reclaim = 1,
+	};
+
+	if (!clear_page_dirty_for_io(page))
+		return PAGE_CLEAN;
+
+	SetPageReclaim(page);
+	res = mapping->a_ops->writepage(page, &wbc);
+	/*
+	 * XXX: This is the Holy Hand Grenade of PotentiallyInvalidMapping. As
+	 * the page lock has been dropped by ->writepage, that mapping could
+	 * be anything
+	 */
+	if (res < 0)
+		handle_write_error(mapping, page, res);
+	if (res == AOP_WRITEPAGE_ACTIVATE) {
+		ClearPageReclaim(page);
+		return PAGE_ACTIVATE;
+	}
+
+	/*
+	 * Wait on writeback if requested to. This happens when
+	 * direct reclaiming a large contiguous area and the
+	 * first attempt to free a range of pages fails.
+	 */
+	if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+		wait_on_page_writeback(page);
+
+	if (!PageWriteback(page)) {
+		/* synchronous write or broken a_ops? */
+		ClearPageReclaim(page);
+	}
+	trace_mm_vmscan_writepage(page,
+		sync_writeback == PAGEOUT_IO_SYNC);
+	inc_zone_page_state(page, NR_VMSCAN_WRITE);
+
+	return PAGE_SUCCESS;
+}
+
 /*
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
@@ -367,45 +416,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
 	if (!may_write_to_queue(mapping->backing_dev_info))
 		return PAGE_KEEP;
 
-	if (clear_page_dirty_for_io(page)) {
-		int res;
-		struct writeback_control wbc = {
-			.sync_mode = WB_SYNC_NONE,
-			.nr_to_write = SWAP_CLUSTER_MAX,
-			.range_start = 0,
-			.range_end = LLONG_MAX,
-			.nonblocking = 1,
-			.for_reclaim = 1,
-		};
-
-		SetPageReclaim(page);
-		res = mapping->a_ops->writepage(page, &wbc);
-		if (res < 0)
-			handle_write_error(mapping, page, res);
-		if (res == AOP_WRITEPAGE_ACTIVATE) {
-			ClearPageReclaim(page);
-			return PAGE_ACTIVATE;
-		}
-
-		/*
-		 * Wait on writeback if requested to. This happens when
-		 * direct reclaiming a large contiguous area and the
-		 * first attempt to free a range of pages fails.
-		 */
-		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
-			wait_on_page_writeback(page);
-
-		if (!PageWriteback(page)) {
-			/* synchronous write or broken a_ops? */
-			ClearPageReclaim(page);
-		}
-		trace_mm_vmscan_writepage(page,
-			sync_writeback == PAGEOUT_IO_SYNC);
-		inc_zone_page_state(page, NR_VMSCAN_WRITE);
-		return PAGE_SUCCESS;
-	}
-
-	return PAGE_CLEAN;
+	return write_reclaim_page(page, mapping, sync_writeback);
 }
 
 /*
@@ -640,19 +651,75 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
 }
 
 /*
+ * Clean a list of pages. It is expected that all the pages on page_list have been
+ * locked as part of isolation from the LRU.
+ *
+ * XXX: Is there a problem with holding multiple page locks like this?
+ */
+static noinline_for_stack void clean_page_list(struct list_head *page_list,
+				struct scan_control *sc)
+{
+	LIST_HEAD(ret_pages);
+	struct page *page;
+
+	if (!sc->may_writepage)
+		return;
+
+	/* Write the pages out to disk in ranges where possible */
+	while (!list_empty(page_list)) {
+		struct address_space *mapping;
+		bool may_enter_fs;
+
+		page = lru_to_page(page_list);
+		list_del(&page->lru);
+		list_add(&page->lru, &ret_pages);
+
+		mapping = page_mapping(page);
+		if (!mapping || !may_write_to_queue(mapping->backing_dev_info)) {
+			unlock_page(page);
+			continue;
+		}
+
+		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+		if (!may_enter_fs) {
+			unlock_page(page);
+			continue;
+		}
+
+		/* Write single page */
+		switch (write_reclaim_page(page, mapping, PAGEOUT_IO_ASYNC)) {
+		case PAGE_KEEP:
+		case PAGE_ACTIVATE:
+		case PAGE_CLEAN:
+			unlock_page(page);
+			break;
+		case PAGE_SUCCESS:
+			break;
+		}
+	}
+	list_splice(&ret_pages, page_list);
+}
+
+/*
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
 					struct scan_control *sc,
 					enum pageout_io sync_writeback)
 {
-	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
-	int pgactivate = 0;
+	LIST_HEAD(putback_pages);
+	LIST_HEAD(dirty_pages);
+	struct list_head *ret_list = page_list;
+	int pgactivate;
+	bool cleaned = false;
 	unsigned long nr_reclaimed = 0;
 
+	pgactivate = 0;
 	cond_resched();
 
+restart_dirty:
 	while (!list_empty(page_list)) {
 		enum page_references references;
 		struct address_space *mapping;
@@ -741,7 +808,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			}
 		}
 
-		if (PageDirty(page)) {
+		if (PageDirty(page))  {
+			/*
+			 * On the first pass, dirty pages are put on a separate
+			 * list. IO is then queued based on ranges of pages for
+			 * each unique mapping in the list
+			 */
+			if (!cleaned) {
+				/* Keep locked for clean_page_list */
+				list_add(&page->lru, &dirty_pages);
+				goto keep_dirty;
+			}
+
 			if (references == PAGEREF_RECLAIM_CLEAN)
 				goto keep_locked;
 			if (!may_enter_fs)
@@ -852,13 +930,22 @@ activate_locked:
 keep_locked:
 		unlock_page(page);
 keep:
-		list_add(&page->lru, &ret_pages);
+		list_add(&page->lru, &putback_pages);
+keep_dirty:
 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
 	}
 
+	if (!cleaned && !list_empty(&dirty_pages)) {
+		clean_page_list(&dirty_pages, sc);
+		page_list = &dirty_pages;
+		cleaned = true;
+		goto restart_dirty;
+	}
+	BUG_ON(!list_empty(&dirty_pages));
+
 	free_page_list(&free_pages);
 
-	list_splice(&ret_pages, page_list);
+	list_splice(&putback_pages, ret_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 	return nr_reclaimed;
 }
-- 
1.7.1


WARNING: multiple messages have this Message-ID (diff)
From: Mel Gorman <mel@csn.ul.ie>
To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org
Cc: Dave Chinner <david@fromorbit.com>,
	Chris Mason <chris.mason@oracle.com>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Christoph Hellwig <hch@infradead.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mel@csn.ul.ie>
Subject: [PATCH 11/12] vmscan: Write out dirty pages in batch
Date: Mon, 14 Jun 2010 12:17:52 +0100	[thread overview]
Message-ID: <1276514273-27693-12-git-send-email-mel@csn.ul.ie> (raw)
In-Reply-To: <1276514273-27693-1-git-send-email-mel@csn.ul.ie>

Page reclaim cleans individual pages using a_ops->writepage() because from
the VM perspective, it is known that pages in a particular zone must be freed
soon, it considers the target page to be the oldest and it does not want
to wait while background flushers cleans other pages. From a filesystem
perspective this is extremely inefficient as it generates a very seeky
IO pattern leading to the perverse situation where it can take longer to
clean all dirty pages than it would have otherwise.

This patch queues all dirty pages at once to maximise the chances that
the write requests get merged efficiently. It also makes the next patch
that avoids writeout from direct reclaim more straight-forward.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
---
 mm/vmscan.c |  175 ++++++++++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 131 insertions(+), 44 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 019f0af..4856a2a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -323,6 +323,55 @@ typedef enum {
 	PAGE_CLEAN,
 } pageout_t;
 
+int write_reclaim_page(struct page *page, struct address_space *mapping,
+						enum pageout_io sync_writeback)
+{
+	int res;
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_NONE,
+		.nr_to_write = SWAP_CLUSTER_MAX,
+		.range_start = 0,
+		.range_end = LLONG_MAX,
+		.nonblocking = 1,
+		.for_reclaim = 1,
+	};
+
+	if (!clear_page_dirty_for_io(page))
+		return PAGE_CLEAN;
+
+	SetPageReclaim(page);
+	res = mapping->a_ops->writepage(page, &wbc);
+	/*
+	 * XXX: This is the Holy Hand Grenade of PotentiallyInvalidMapping. As
+	 * the page lock has been dropped by ->writepage, that mapping could
+	 * be anything
+	 */
+	if (res < 0)
+		handle_write_error(mapping, page, res);
+	if (res == AOP_WRITEPAGE_ACTIVATE) {
+		ClearPageReclaim(page);
+		return PAGE_ACTIVATE;
+	}
+
+	/*
+	 * Wait on writeback if requested to. This happens when
+	 * direct reclaiming a large contiguous area and the
+	 * first attempt to free a range of pages fails.
+	 */
+	if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+		wait_on_page_writeback(page);
+
+	if (!PageWriteback(page)) {
+		/* synchronous write or broken a_ops? */
+		ClearPageReclaim(page);
+	}
+	trace_mm_vmscan_writepage(page,
+		sync_writeback == PAGEOUT_IO_SYNC);
+	inc_zone_page_state(page, NR_VMSCAN_WRITE);
+
+	return PAGE_SUCCESS;
+}
+
 /*
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
@@ -367,45 +416,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
 	if (!may_write_to_queue(mapping->backing_dev_info))
 		return PAGE_KEEP;
 
-	if (clear_page_dirty_for_io(page)) {
-		int res;
-		struct writeback_control wbc = {
-			.sync_mode = WB_SYNC_NONE,
-			.nr_to_write = SWAP_CLUSTER_MAX,
-			.range_start = 0,
-			.range_end = LLONG_MAX,
-			.nonblocking = 1,
-			.for_reclaim = 1,
-		};
-
-		SetPageReclaim(page);
-		res = mapping->a_ops->writepage(page, &wbc);
-		if (res < 0)
-			handle_write_error(mapping, page, res);
-		if (res == AOP_WRITEPAGE_ACTIVATE) {
-			ClearPageReclaim(page);
-			return PAGE_ACTIVATE;
-		}
-
-		/*
-		 * Wait on writeback if requested to. This happens when
-		 * direct reclaiming a large contiguous area and the
-		 * first attempt to free a range of pages fails.
-		 */
-		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
-			wait_on_page_writeback(page);
-
-		if (!PageWriteback(page)) {
-			/* synchronous write or broken a_ops? */
-			ClearPageReclaim(page);
-		}
-		trace_mm_vmscan_writepage(page,
-			sync_writeback == PAGEOUT_IO_SYNC);
-		inc_zone_page_state(page, NR_VMSCAN_WRITE);
-		return PAGE_SUCCESS;
-	}
-
-	return PAGE_CLEAN;
+	return write_reclaim_page(page, mapping, sync_writeback);
 }
 
 /*
@@ -640,19 +651,75 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
 }
 
 /*
+ * Clean a list of pages. It is expected that all the pages on page_list have been
+ * locked as part of isolation from the LRU.
+ *
+ * XXX: Is there a problem with holding multiple page locks like this?
+ */
+static noinline_for_stack void clean_page_list(struct list_head *page_list,
+				struct scan_control *sc)
+{
+	LIST_HEAD(ret_pages);
+	struct page *page;
+
+	if (!sc->may_writepage)
+		return;
+
+	/* Write the pages out to disk in ranges where possible */
+	while (!list_empty(page_list)) {
+		struct address_space *mapping;
+		bool may_enter_fs;
+
+		page = lru_to_page(page_list);
+		list_del(&page->lru);
+		list_add(&page->lru, &ret_pages);
+
+		mapping = page_mapping(page);
+		if (!mapping || !may_write_to_queue(mapping->backing_dev_info)) {
+			unlock_page(page);
+			continue;
+		}
+
+		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+		if (!may_enter_fs) {
+			unlock_page(page);
+			continue;
+		}
+
+		/* Write single page */
+		switch (write_reclaim_page(page, mapping, PAGEOUT_IO_ASYNC)) {
+		case PAGE_KEEP:
+		case PAGE_ACTIVATE:
+		case PAGE_CLEAN:
+			unlock_page(page);
+			break;
+		case PAGE_SUCCESS:
+			break;
+		}
+	}
+	list_splice(&ret_pages, page_list);
+}
+
+/*
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
 					struct scan_control *sc,
 					enum pageout_io sync_writeback)
 {
-	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
-	int pgactivate = 0;
+	LIST_HEAD(putback_pages);
+	LIST_HEAD(dirty_pages);
+	struct list_head *ret_list = page_list;
+	int pgactivate;
+	bool cleaned = false;
 	unsigned long nr_reclaimed = 0;
 
+	pgactivate = 0;
 	cond_resched();
 
+restart_dirty:
 	while (!list_empty(page_list)) {
 		enum page_references references;
 		struct address_space *mapping;
@@ -741,7 +808,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			}
 		}
 
-		if (PageDirty(page)) {
+		if (PageDirty(page))  {
+			/*
+			 * On the first pass, dirty pages are put on a separate
+			 * list. IO is then queued based on ranges of pages for
+			 * each unique mapping in the list
+			 */
+			if (!cleaned) {
+				/* Keep locked for clean_page_list */
+				list_add(&page->lru, &dirty_pages);
+				goto keep_dirty;
+			}
+
 			if (references == PAGEREF_RECLAIM_CLEAN)
 				goto keep_locked;
 			if (!may_enter_fs)
@@ -852,13 +930,22 @@ activate_locked:
 keep_locked:
 		unlock_page(page);
 keep:
-		list_add(&page->lru, &ret_pages);
+		list_add(&page->lru, &putback_pages);
+keep_dirty:
 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
 	}
 
+	if (!cleaned && !list_empty(&dirty_pages)) {
+		clean_page_list(&dirty_pages, sc);
+		page_list = &dirty_pages;
+		cleaned = true;
+		goto restart_dirty;
+	}
+	BUG_ON(!list_empty(&dirty_pages));
+
 	free_page_list(&free_pages);
 
-	list_splice(&ret_pages, page_list);
+	list_splice(&putback_pages, ret_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 	return nr_reclaimed;
 }
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-06-14 11:19 UTC|newest]

Thread overview: 198+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-14 11:17 [PATCH 0/12] Avoid overflowing of stack during page reclaim V2 Mel Gorman
2010-06-14 11:17 ` Mel Gorman
2010-06-14 11:17 ` [PATCH 01/12] tracing, vmscan: Add trace events for kswapd wakeup, sleeping and direct reclaim Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 15:45   ` Rik van Riel
2010-06-14 15:45     ` Rik van Riel
2010-06-14 21:01   ` Larry Woodman
2010-06-14 21:01     ` Larry Woodman
2010-06-14 11:17 ` [PATCH 02/12] tracing, vmscan: Add trace events for LRU page isolation Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 16:47   ` Rik van Riel
2010-06-14 16:47     ` Rik van Riel
2010-06-14 21:02   ` Larry Woodman
2010-06-14 21:02     ` Larry Woodman
2010-06-14 11:17 ` [PATCH 03/12] tracing, vmscan: Add trace event when a page is written Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 16:48   ` Rik van Riel
2010-06-14 16:48     ` Rik van Riel
2010-06-14 21:02   ` Larry Woodman
2010-06-14 21:02     ` Larry Woodman
2010-06-14 11:17 ` [PATCH 04/12] tracing, vmscan: Add a postprocessing script for reclaim-related ftrace events Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 17:55   ` Rik van Riel
2010-06-14 17:55     ` Rik van Riel
2010-06-14 21:03   ` Larry Woodman
2010-06-14 21:03     ` Larry Woodman
2010-06-14 11:17 ` [PATCH 05/12] vmscan: kill prev_priority completely Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 18:04   ` Rik van Riel
2010-06-14 18:04     ` Rik van Riel
2010-06-16 23:37   ` Andrew Morton
2010-06-16 23:37     ` Andrew Morton
2010-06-16 23:45     ` Rik van Riel
2010-06-16 23:45       ` Rik van Riel
2010-06-17  0:18       ` Andrew Morton
2010-06-17  0:18         ` Andrew Morton
2010-06-17  0:34         ` Rik van Riel
2010-06-17  0:34           ` Rik van Riel
2010-06-25  8:29     ` KOSAKI Motohiro
2010-06-25  8:29       ` KOSAKI Motohiro
2010-06-28 10:35       ` Mel Gorman
2010-06-28 10:35         ` Mel Gorman
2010-06-14 11:17 ` [PATCH 06/12] vmscan: simplify shrink_inactive_list() Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 18:06   ` Rik van Riel
2010-06-14 18:06     ` Rik van Riel
2010-06-15 10:13     ` Mel Gorman
2010-06-15 10:13       ` Mel Gorman
2010-06-14 11:17 ` [PATCH 07/12] vmscan: Remove unnecessary temporary vars in do_try_to_free_pages Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 18:14   ` Rik van Riel
2010-06-14 18:14     ` Rik van Riel
2010-06-14 11:17 ` [PATCH 08/12] vmscan: Setup pagevec as late as possible in shrink_inactive_list() Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 18:59   ` Rik van Riel
2010-06-14 18:59     ` Rik van Riel
2010-06-15 10:47   ` Christoph Hellwig
2010-06-15 10:47     ` Christoph Hellwig
2010-06-15 15:56     ` Mel Gorman
2010-06-15 15:56       ` Mel Gorman
2010-06-16 23:43   ` Andrew Morton
2010-06-16 23:43     ` Andrew Morton
2010-06-17 10:30     ` Mel Gorman
2010-06-17 10:30       ` Mel Gorman
2010-06-14 11:17 ` [PATCH 09/12] vmscan: Setup pagevec as late as possible in shrink_page_list() Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 19:24   ` Rik van Riel
2010-06-14 19:24     ` Rik van Riel
2010-06-16 23:48   ` Andrew Morton
2010-06-16 23:48     ` Andrew Morton
2010-06-17 10:46     ` Mel Gorman
2010-06-17 10:46       ` Mel Gorman
2010-06-14 11:17 ` [PATCH 10/12] vmscan: Update isolated page counters outside of main path in shrink_inactive_list() Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 19:42   ` Rik van Riel
2010-06-14 19:42     ` Rik van Riel
2010-06-14 11:17 ` Mel Gorman [this message]
2010-06-14 11:17   ` [PATCH 11/12] vmscan: Write out dirty pages in batch Mel Gorman
2010-06-14 21:13   ` Rik van Riel
2010-06-14 21:13     ` Rik van Riel
2010-06-15 10:18     ` Mel Gorman
2010-06-15 10:18       ` Mel Gorman
2010-06-14 23:11   ` Dave Chinner
2010-06-14 23:11     ` Dave Chinner
2010-06-14 23:21     ` Andrew Morton
2010-06-14 23:21       ` Andrew Morton
2010-06-15  0:39       ` Dave Chinner
2010-06-15  0:39         ` Dave Chinner
2010-06-15  1:16         ` Rik van Riel
2010-06-15  1:16           ` Rik van Riel
2010-06-15  1:45           ` Andrew Morton
2010-06-15  1:45             ` Andrew Morton
2010-06-15  4:08             ` Rik van Riel
2010-06-15  4:08               ` Rik van Riel
2010-06-15  4:37               ` Andrew Morton
2010-06-15  4:37                 ` Andrew Morton
2010-06-15  5:12                 ` Nick Piggin
2010-06-15  5:12                   ` Nick Piggin
2010-06-15  5:43                   ` [patch] mm: vmscan fix mapping use after free Nick Piggin
2010-06-15  5:43                     ` Nick Piggin
2010-06-15 13:23                     ` Mel Gorman
2010-06-15 13:23                       ` Mel Gorman
2010-06-15 11:01           ` [PATCH 11/12] vmscan: Write out dirty pages in batch Christoph Hellwig
2010-06-15 11:01             ` Christoph Hellwig
2010-06-15 13:32             ` Rik van Riel
2010-06-15 13:32               ` Rik van Riel
2010-06-15  1:39         ` Andrew Morton
2010-06-15  1:39           ` Andrew Morton
2010-06-15  3:20           ` Dave Chinner
2010-06-15  3:20             ` Dave Chinner
2010-06-15  4:15             ` Andrew Morton
2010-06-15  4:15               ` Andrew Morton
2010-06-15  6:36               ` Dave Chinner
2010-06-15  6:36                 ` Dave Chinner
2010-06-15 10:28                 ` Evgeniy Polyakov
2010-06-15 10:28                   ` Evgeniy Polyakov
2010-06-15 10:55                   ` Nick Piggin
2010-06-15 10:55                     ` Nick Piggin
2010-06-15 11:10                     ` Christoph Hellwig
2010-06-15 11:10                       ` Christoph Hellwig
2010-06-15 11:20                       ` Nick Piggin
2010-06-15 11:20                         ` Nick Piggin
2010-06-15 23:20                     ` Dave Chinner
2010-06-15 23:20                       ` Dave Chinner
2010-06-16  6:04                       ` Nick Piggin
2010-06-16  6:04                         ` Nick Piggin
2010-06-15 11:08                   ` Christoph Hellwig
2010-06-15 11:08                     ` Christoph Hellwig
2010-06-15 11:43               ` Mel Gorman
2010-06-15 11:43                 ` Mel Gorman
2010-06-15 13:07                 ` tytso
2010-06-15 13:07                   ` tytso
2010-06-15 15:44                 ` Mel Gorman
2010-06-15 15:44                   ` Mel Gorman
2010-06-15 10:57       ` Christoph Hellwig
2010-06-15 10:57         ` Christoph Hellwig
2010-06-15 10:53   ` Christoph Hellwig
2010-06-15 10:53     ` Christoph Hellwig
2010-06-15 11:11     ` Mel Gorman
2010-06-15 11:11       ` Mel Gorman
2010-06-15 11:13     ` Nick Piggin
2010-06-15 11:13       ` Nick Piggin
2010-06-14 11:17 ` [PATCH 12/12] vmscan: Do not writeback pages in direct reclaim Mel Gorman
2010-06-14 11:17   ` Mel Gorman
2010-06-14 21:55   ` Rik van Riel
2010-06-14 21:55     ` Rik van Riel
2010-06-15 11:45     ` Mel Gorman
2010-06-15 11:45       ` Mel Gorman
2010-06-15 13:34       ` Rik van Riel
2010-06-15 13:34         ` Rik van Riel
2010-06-15 13:37         ` Christoph Hellwig
2010-06-15 13:37           ` Christoph Hellwig
2010-06-15 13:54           ` Mel Gorman
2010-06-15 13:54             ` Mel Gorman
2010-06-16  0:30             ` KAMEZAWA Hiroyuki
2010-06-16  0:30               ` KAMEZAWA Hiroyuki
2010-06-15 14:02           ` Rik van Riel
2010-06-15 14:02             ` Rik van Riel
2010-06-15 13:59         ` Mel Gorman
2010-06-15 13:59           ` Mel Gorman
2010-06-15 14:04           ` Rik van Riel
2010-06-15 14:04             ` Rik van Riel
2010-06-15 14:16             ` Mel Gorman
2010-06-15 14:16               ` Mel Gorman
2010-06-16  0:17               ` KAMEZAWA Hiroyuki
2010-06-16  0:17                 ` KAMEZAWA Hiroyuki
2010-06-16  0:29                 ` Rik van Riel
2010-06-16  0:29                   ` Rik van Riel
2010-06-16  0:39                   ` KAMEZAWA Hiroyuki
2010-06-16  0:39                     ` KAMEZAWA Hiroyuki
2010-06-16  0:53                     ` Rik van Riel
2010-06-16  0:53                       ` Rik van Riel
2010-06-16  1:40                       ` KAMEZAWA Hiroyuki
2010-06-16  1:40                         ` KAMEZAWA Hiroyuki
2010-06-16  2:20                         ` KAMEZAWA Hiroyuki
2010-06-16  2:20                           ` KAMEZAWA Hiroyuki
2010-06-16  5:11                           ` Christoph Hellwig
2010-06-16  5:11                             ` Christoph Hellwig
2010-06-16 10:51                             ` Jens Axboe
2010-06-16 10:51                               ` Jens Axboe
2010-06-16  5:07                     ` Christoph Hellwig
2010-06-16  5:07                       ` Christoph Hellwig
2010-06-16  5:06                 ` Christoph Hellwig
2010-06-16  5:06                   ` Christoph Hellwig
2010-06-17  0:25                   ` KAMEZAWA Hiroyuki
2010-06-17  0:25                     ` KAMEZAWA Hiroyuki
2010-06-17  6:16                     ` Christoph Hellwig
2010-06-17  6:16                       ` Christoph Hellwig
2010-06-17  6:23                       ` KAMEZAWA Hiroyuki
2010-06-17  6:23                         ` KAMEZAWA Hiroyuki
2010-06-14 15:10 ` [PATCH 0/12] Avoid overflowing of stack during page reclaim V2 Christoph Hellwig
2010-06-14 15:10   ` Christoph Hellwig
2010-06-15 11:45   ` Mel Gorman
2010-06-15 11:45     ` Mel Gorman
2010-06-15  0:08 ` KAMEZAWA Hiroyuki
2010-06-15  0:08   ` KAMEZAWA Hiroyuki
2010-06-15 11:49   ` Mel Gorman
2010-06-15 11:49     ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1276514273-27693-12-git-send-email-mel@csn.ul.ie \
    --to=mel@csn.ul.ie \
    --cc=akpm@linux-foundation.org \
    --cc=chris.mason@oracle.com \
    --cc=david@fromorbit.com \
    --cc=hannes@cmpxchg.org \
    --cc=hch@infradead.org \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=npiggin@suse.de \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.