From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753839Ab0FNLSB (ORCPT ); Mon, 14 Jun 2010 07:18:01 -0400 Received: from gir.skynet.ie ([193.1.99.77]:47596 "EHLO gir.skynet.ie" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752473Ab0FNLR4 (ORCPT ); Mon, 14 Jun 2010 07:17:56 -0400 From: Mel Gorman To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-mm@kvack.org Cc: Dave Chinner , Chris Mason , Nick Piggin , Rik van Riel , Johannes Weiner , Christoph Hellwig , KAMEZAWA Hiroyuki , Andrew Morton , Mel Gorman Subject: [PATCH 02/12] tracing, vmscan: Add trace events for LRU page isolation Date: Mon, 14 Jun 2010 12:17:43 +0100 Message-Id: <1276514273-27693-3-git-send-email-mel@csn.ul.ie> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1276514273-27693-1-git-send-email-mel@csn.ul.ie> References: <1276514273-27693-1-git-send-email-mel@csn.ul.ie> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This patch adds an event for when pages are isolated en-masse from the LRU lists. This event augments the information available on LRU traffic and can be used to evaluate lumpy reclaim. Signed-off-by: Mel Gorman --- include/trace/events/vmscan.h | 46 +++++++++++++++++++++++++++++++++++++++++ mm/vmscan.c | 14 ++++++++++++ 2 files changed, 60 insertions(+), 0 deletions(-) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index f76521f..a331454 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -109,6 +109,52 @@ TRACE_EVENT(mm_vmscan_direct_reclaim_end, TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed) ); +TRACE_EVENT(mm_vmscan_lru_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + unsigned long nr_lumpy_taken, + unsigned long nr_lumpy_dirty, + unsigned long nr_lumpy_failed, + int isolate_mode), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode), + + TP_STRUCT__entry( + __field(int, order) + __field(unsigned long, nr_requested) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_lumpy_taken) + __field(unsigned long, nr_lumpy_dirty) + __field(unsigned long, nr_lumpy_failed) + __field(int, isolate_mode) + ), + + TP_fast_assign( + __entry->order = order; + __entry->nr_requested = nr_requested; + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + __entry->nr_lumpy_taken = nr_lumpy_taken; + __entry->nr_lumpy_dirty = nr_lumpy_dirty; + __entry->nr_lumpy_failed = nr_lumpy_failed; + __entry->isolate_mode = isolate_mode; + ), + + TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu", + __entry->isolate_mode, + __entry->order, + __entry->nr_requested, + __entry->nr_scanned, + __entry->nr_taken, + __entry->nr_lumpy_taken, + __entry->nr_lumpy_dirty, + __entry->nr_lumpy_failed) +); + #endif /* _TRACE_VMSCAN_H */ /* This part must be outside protection */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 6bfb579..25bf05a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -917,6 +917,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long *scanned, int order, int mode, int file) { unsigned long nr_taken = 0; + unsigned long nr_lumpy_taken = 0, nr_lumpy_dirty = 0, nr_lumpy_failed = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { @@ -994,12 +995,25 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, list_move(&cursor_page->lru, dst); mem_cgroup_del_lru(cursor_page); nr_taken++; + nr_lumpy_taken++; + if (PageDirty(cursor_page)) + nr_lumpy_dirty++; scan++; + } else { + if (mode == ISOLATE_BOTH && + page_count(cursor_page)) + nr_lumpy_failed++; } } } *scanned = scan; + + trace_mm_vmscan_lru_isolate(order, + nr_to_scan, scan, + nr_taken, + nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, + mode); return nr_taken; } -- 1.7.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mel Gorman Subject: [PATCH 02/12] tracing, vmscan: Add trace events for LRU page isolation Date: Mon, 14 Jun 2010 12:17:43 +0100 Message-ID: <1276514273-27693-3-git-send-email-mel@csn.ul.ie> References: <1276514273-27693-1-git-send-email-mel@csn.ul.ie> Cc: Dave Chinner , Chris Mason , Nick Piggin , Rik van Riel , Johannes Weiner , Christoph Hellwig , KAMEZAWA Hiroyuki , Andrew Morton , Mel Gorman To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-mm@kvack.org Return-path: In-Reply-To: <1276514273-27693-1-git-send-email-mel@csn.ul.ie> Sender: owner-linux-mm@kvack.org List-Id: linux-fsdevel.vger.kernel.org This patch adds an event for when pages are isolated en-masse from the LRU lists. This event augments the information available on LRU traffic and can be used to evaluate lumpy reclaim. Signed-off-by: Mel Gorman --- include/trace/events/vmscan.h | 46 +++++++++++++++++++++++++++++++++++++++++ mm/vmscan.c | 14 ++++++++++++ 2 files changed, 60 insertions(+), 0 deletions(-) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index f76521f..a331454 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -109,6 +109,52 @@ TRACE_EVENT(mm_vmscan_direct_reclaim_end, TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed) ); +TRACE_EVENT(mm_vmscan_lru_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + unsigned long nr_lumpy_taken, + unsigned long nr_lumpy_dirty, + unsigned long nr_lumpy_failed, + int isolate_mode), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode), + + TP_STRUCT__entry( + __field(int, order) + __field(unsigned long, nr_requested) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_lumpy_taken) + __field(unsigned long, nr_lumpy_dirty) + __field(unsigned long, nr_lumpy_failed) + __field(int, isolate_mode) + ), + + TP_fast_assign( + __entry->order = order; + __entry->nr_requested = nr_requested; + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + __entry->nr_lumpy_taken = nr_lumpy_taken; + __entry->nr_lumpy_dirty = nr_lumpy_dirty; + __entry->nr_lumpy_failed = nr_lumpy_failed; + __entry->isolate_mode = isolate_mode; + ), + + TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu", + __entry->isolate_mode, + __entry->order, + __entry->nr_requested, + __entry->nr_scanned, + __entry->nr_taken, + __entry->nr_lumpy_taken, + __entry->nr_lumpy_dirty, + __entry->nr_lumpy_failed) +); + #endif /* _TRACE_VMSCAN_H */ /* This part must be outside protection */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 6bfb579..25bf05a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -917,6 +917,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long *scanned, int order, int mode, int file) { unsigned long nr_taken = 0; + unsigned long nr_lumpy_taken = 0, nr_lumpy_dirty = 0, nr_lumpy_failed = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { @@ -994,12 +995,25 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, list_move(&cursor_page->lru, dst); mem_cgroup_del_lru(cursor_page); nr_taken++; + nr_lumpy_taken++; + if (PageDirty(cursor_page)) + nr_lumpy_dirty++; scan++; + } else { + if (mode == ISOLATE_BOTH && + page_count(cursor_page)) + nr_lumpy_failed++; } } } *scanned = scan; + + trace_mm_vmscan_lru_isolate(order, + nr_to_scan, scan, + nr_taken, + nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, + mode); return nr_taken; } -- 1.7.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org