All of lore.kernel.org
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Michal Hocko <mhocko@kernel.org>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mgorman@suse.de>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: Re: [PATCH 2/7] mm, vmscan: add active list aging tracepoint
Date: Thu, 29 Dec 2016 14:33:59 +0900	[thread overview]
Message-ID: <20161229053359.GA1815@bbox> (raw)
In-Reply-To: <20161228153032.10821-3-mhocko@kernel.org>

On Wed, Dec 28, 2016 at 04:30:27PM +0100, Michal Hocko wrote:
> From: Michal Hocko <mhocko@suse.com>
> 
> Our reclaim process has several tracepoints to tell us more about how
> things are progressing. We are, however, missing a tracepoint to track
> active list aging. Introduce mm_vmscan_lru_shrink_active which reports
> the number of scanned, rotated, deactivated and freed pages from the
> particular node's active list.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>
> ---
>  include/linux/gfp.h           |  2 +-
>  include/trace/events/vmscan.h | 38 ++++++++++++++++++++++++++++++++++++++
>  mm/page_alloc.c               |  6 +++++-
>  mm/vmscan.c                   | 22 +++++++++++++++++-----
>  4 files changed, 61 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 4175dca4ac39..61aa9b49e86d 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -503,7 +503,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
>  extern void __free_pages(struct page *page, unsigned int order);
>  extern void free_pages(unsigned long addr, unsigned int order);
>  extern void free_hot_cold_page(struct page *page, bool cold);
> -extern void free_hot_cold_page_list(struct list_head *list, bool cold);
> +extern int free_hot_cold_page_list(struct list_head *list, bool cold);
>  
>  struct page_frag_cache;
>  extern void __page_frag_drain(struct page *page, unsigned int order,
> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
> index 39bad8921ca1..d34cc0ced2be 100644
> --- a/include/trace/events/vmscan.h
> +++ b/include/trace/events/vmscan.h
> @@ -363,6 +363,44 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
>  		show_reclaim_flags(__entry->reclaim_flags))
>  );
>  
> +TRACE_EVENT(mm_vmscan_lru_shrink_active,
> +
> +	TP_PROTO(int nid, unsigned long nr_scanned, unsigned long nr_freed,
> +		unsigned long nr_unevictable, unsigned long nr_deactivated,
> +		unsigned long nr_rotated, int priority, int file),
> +
> +	TP_ARGS(nid, nr_scanned, nr_freed, nr_unevictable, nr_deactivated, nr_rotated, priority, file),

I agree it is helpful. And it was when I investigated aging problem of 32bit
when node-lru was introduced. However, the question is we really need all those
kinds of information? just enough with nr_taken, nr_deactivated, priority, file?

Also, look at minor thing below.

Thanks.

> +
> +	TP_STRUCT__entry(
> +		__field(int, nid)
> +		__field(unsigned long, nr_scanned)
> +		__field(unsigned long, nr_freed)
> +		__field(unsigned long, nr_unevictable)
> +		__field(unsigned long, nr_deactivated)
> +		__field(unsigned long, nr_rotated)
> +		__field(int, priority)
> +		__field(int, reclaim_flags)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->nid = nid;
> +		__entry->nr_scanned = nr_scanned;
> +		__entry->nr_freed = nr_freed;
> +		__entry->nr_unevictable = nr_unevictable;
> +		__entry->nr_deactivated = nr_deactivated;
> +		__entry->nr_rotated = nr_rotated;
> +		__entry->priority = priority;
> +		__entry->reclaim_flags = trace_shrink_flags(file);
> +	),
> +
> +	TP_printk("nid=%d nr_scanned=%ld nr_freed=%ld nr_unevictable=%ld nr_deactivated=%ld nr_rotated=%ld priority=%d flags=%s",
> +		__entry->nid,
> +		__entry->nr_scanned, __entry->nr_freed, __entry->nr_unevictable,
> +		__entry->nr_deactivated, __entry->nr_rotated,
> +		__entry->priority,
> +		show_reclaim_flags(__entry->reclaim_flags))
> +);
> +
>  #endif /* _TRACE_VMSCAN_H */
>  
>  /* This part must be outside protection */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 1c24112308d6..77d204660857 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2487,14 +2487,18 @@ void free_hot_cold_page(struct page *page, bool cold)
>  /*
>   * Free a list of 0-order pages
>   */
> -void free_hot_cold_page_list(struct list_head *list, bool cold)
> +int free_hot_cold_page_list(struct list_head *list, bool cold)
>  {
>  	struct page *page, *next;
> +	int ret = 0;
>  
>  	list_for_each_entry_safe(page, next, list, lru) {
>  		trace_mm_page_free_batched(page, cold);
>  		free_hot_cold_page(page, cold);
> +		ret++;
>  	}
> +
> +	return ret;
>  }
>  
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c4abf08861d2..2302a1a58c6e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1846,9 +1846,11 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
>   *
>   * The downside is that we have to touch page->_refcount against each page.
>   * But we had to alter page->flags anyway.
> + *
> + * Returns the number of pages moved to the given lru.
>   */
>  
> -static void move_active_pages_to_lru(struct lruvec *lruvec,
> +static int move_active_pages_to_lru(struct lruvec *lruvec,
>  				     struct list_head *list,
>  				     struct list_head *pages_to_free,
>  				     enum lru_list lru)
> @@ -1857,6 +1859,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
>  	unsigned long pgmoved = 0;
>  	struct page *page;
>  	int nr_pages;
> +	int nr_moved = 0;
>  
>  	while (!list_empty(list)) {
>  		page = lru_to_page(list);
> @@ -1882,11 +1885,15 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
>  				spin_lock_irq(&pgdat->lru_lock);
>  			} else
>  				list_add(&page->lru, pages_to_free);
> +		} else {
> +			nr_moved++;
>  		}
>  	}
>  
>  	if (!is_active_lru(lru))
>  		__count_vm_events(PGDEACTIVATE, pgmoved);
> +
> +	return nr_moved;
>  }
>  
>  static void shrink_active_list(unsigned long nr_to_scan,
> @@ -1902,7 +1909,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  	LIST_HEAD(l_inactive);
>  	struct page *page;
>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
> -	unsigned long nr_rotated = 0;
> +	unsigned long nr_rotated = 0, nr_unevictable = 0;
> +	unsigned long nr_freed, nr_deactivate, nr_activate;
>  	isolate_mode_t isolate_mode = 0;
>  	int file = is_file_lru(lru);
>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
> @@ -1935,6 +1943,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  		if (unlikely(!page_evictable(page))) {
>  			putback_lru_page(page);
> +			nr_unevictable++;
>  			continue;
>  		}
>  
> @@ -1980,13 +1989,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  	 */
>  	reclaim_stat->recent_rotated[file] += nr_rotated;
>  
> -	move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
> -	move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
> +	nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);

Who use nr_active in here?

> +	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
>  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
>  	spin_unlock_irq(&pgdat->lru_lock);
>  
>  	mem_cgroup_uncharge_list(&l_hold);
> -	free_hot_cold_page_list(&l_hold, true);
> +	nr_freed = free_hot_cold_page_list(&l_hold, true);
> +	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_scanned, nr_freed,
> +			nr_unevictable, nr_deactivate, nr_rotated,
> +			sc->priority, file);
>  }
>  
>  /*
> -- 
> 2.10.2
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Minchan Kim <minchan@kernel.org>
To: Michal Hocko <mhocko@kernel.org>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mgorman@suse.de>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: Re: [PATCH 2/7] mm, vmscan: add active list aging tracepoint
Date: Thu, 29 Dec 2016 14:33:59 +0900	[thread overview]
Message-ID: <20161229053359.GA1815@bbox> (raw)
In-Reply-To: <20161228153032.10821-3-mhocko@kernel.org>

On Wed, Dec 28, 2016 at 04:30:27PM +0100, Michal Hocko wrote:
> From: Michal Hocko <mhocko@suse.com>
> 
> Our reclaim process has several tracepoints to tell us more about how
> things are progressing. We are, however, missing a tracepoint to track
> active list aging. Introduce mm_vmscan_lru_shrink_active which reports
> the number of scanned, rotated, deactivated and freed pages from the
> particular node's active list.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>
> ---
>  include/linux/gfp.h           |  2 +-
>  include/trace/events/vmscan.h | 38 ++++++++++++++++++++++++++++++++++++++
>  mm/page_alloc.c               |  6 +++++-
>  mm/vmscan.c                   | 22 +++++++++++++++++-----
>  4 files changed, 61 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 4175dca4ac39..61aa9b49e86d 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -503,7 +503,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
>  extern void __free_pages(struct page *page, unsigned int order);
>  extern void free_pages(unsigned long addr, unsigned int order);
>  extern void free_hot_cold_page(struct page *page, bool cold);
> -extern void free_hot_cold_page_list(struct list_head *list, bool cold);
> +extern int free_hot_cold_page_list(struct list_head *list, bool cold);
>  
>  struct page_frag_cache;
>  extern void __page_frag_drain(struct page *page, unsigned int order,
> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
> index 39bad8921ca1..d34cc0ced2be 100644
> --- a/include/trace/events/vmscan.h
> +++ b/include/trace/events/vmscan.h
> @@ -363,6 +363,44 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
>  		show_reclaim_flags(__entry->reclaim_flags))
>  );
>  
> +TRACE_EVENT(mm_vmscan_lru_shrink_active,
> +
> +	TP_PROTO(int nid, unsigned long nr_scanned, unsigned long nr_freed,
> +		unsigned long nr_unevictable, unsigned long nr_deactivated,
> +		unsigned long nr_rotated, int priority, int file),
> +
> +	TP_ARGS(nid, nr_scanned, nr_freed, nr_unevictable, nr_deactivated, nr_rotated, priority, file),

I agree it is helpful. And it was when I investigated aging problem of 32bit
when node-lru was introduced. However, the question is we really need all those
kinds of information? just enough with nr_taken, nr_deactivated, priority, file?

Also, look at minor thing below.

Thanks.

> +
> +	TP_STRUCT__entry(
> +		__field(int, nid)
> +		__field(unsigned long, nr_scanned)
> +		__field(unsigned long, nr_freed)
> +		__field(unsigned long, nr_unevictable)
> +		__field(unsigned long, nr_deactivated)
> +		__field(unsigned long, nr_rotated)
> +		__field(int, priority)
> +		__field(int, reclaim_flags)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->nid = nid;
> +		__entry->nr_scanned = nr_scanned;
> +		__entry->nr_freed = nr_freed;
> +		__entry->nr_unevictable = nr_unevictable;
> +		__entry->nr_deactivated = nr_deactivated;
> +		__entry->nr_rotated = nr_rotated;
> +		__entry->priority = priority;
> +		__entry->reclaim_flags = trace_shrink_flags(file);
> +	),
> +
> +	TP_printk("nid=%d nr_scanned=%ld nr_freed=%ld nr_unevictable=%ld nr_deactivated=%ld nr_rotated=%ld priority=%d flags=%s",
> +		__entry->nid,
> +		__entry->nr_scanned, __entry->nr_freed, __entry->nr_unevictable,
> +		__entry->nr_deactivated, __entry->nr_rotated,
> +		__entry->priority,
> +		show_reclaim_flags(__entry->reclaim_flags))
> +);
> +
>  #endif /* _TRACE_VMSCAN_H */
>  
>  /* This part must be outside protection */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 1c24112308d6..77d204660857 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2487,14 +2487,18 @@ void free_hot_cold_page(struct page *page, bool cold)
>  /*
>   * Free a list of 0-order pages
>   */
> -void free_hot_cold_page_list(struct list_head *list, bool cold)
> +int free_hot_cold_page_list(struct list_head *list, bool cold)
>  {
>  	struct page *page, *next;
> +	int ret = 0;
>  
>  	list_for_each_entry_safe(page, next, list, lru) {
>  		trace_mm_page_free_batched(page, cold);
>  		free_hot_cold_page(page, cold);
> +		ret++;
>  	}
> +
> +	return ret;
>  }
>  
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c4abf08861d2..2302a1a58c6e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1846,9 +1846,11 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
>   *
>   * The downside is that we have to touch page->_refcount against each page.
>   * But we had to alter page->flags anyway.
> + *
> + * Returns the number of pages moved to the given lru.
>   */
>  
> -static void move_active_pages_to_lru(struct lruvec *lruvec,
> +static int move_active_pages_to_lru(struct lruvec *lruvec,
>  				     struct list_head *list,
>  				     struct list_head *pages_to_free,
>  				     enum lru_list lru)
> @@ -1857,6 +1859,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
>  	unsigned long pgmoved = 0;
>  	struct page *page;
>  	int nr_pages;
> +	int nr_moved = 0;
>  
>  	while (!list_empty(list)) {
>  		page = lru_to_page(list);
> @@ -1882,11 +1885,15 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
>  				spin_lock_irq(&pgdat->lru_lock);
>  			} else
>  				list_add(&page->lru, pages_to_free);
> +		} else {
> +			nr_moved++;
>  		}
>  	}
>  
>  	if (!is_active_lru(lru))
>  		__count_vm_events(PGDEACTIVATE, pgmoved);
> +
> +	return nr_moved;
>  }
>  
>  static void shrink_active_list(unsigned long nr_to_scan,
> @@ -1902,7 +1909,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  	LIST_HEAD(l_inactive);
>  	struct page *page;
>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
> -	unsigned long nr_rotated = 0;
> +	unsigned long nr_rotated = 0, nr_unevictable = 0;
> +	unsigned long nr_freed, nr_deactivate, nr_activate;
>  	isolate_mode_t isolate_mode = 0;
>  	int file = is_file_lru(lru);
>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
> @@ -1935,6 +1943,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  		if (unlikely(!page_evictable(page))) {
>  			putback_lru_page(page);
> +			nr_unevictable++;
>  			continue;
>  		}
>  
> @@ -1980,13 +1989,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  	 */
>  	reclaim_stat->recent_rotated[file] += nr_rotated;
>  
> -	move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
> -	move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
> +	nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);

Who use nr_active in here?

> +	nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
>  	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
>  	spin_unlock_irq(&pgdat->lru_lock);
>  
>  	mem_cgroup_uncharge_list(&l_hold);
> -	free_hot_cold_page_list(&l_hold, true);
> +	nr_freed = free_hot_cold_page_list(&l_hold, true);
> +	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_scanned, nr_freed,
> +			nr_unevictable, nr_deactivate, nr_rotated,
> +			sc->priority, file);
>  }
>  
>  /*
> -- 
> 2.10.2
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2016-12-29  5:34 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-28 15:30 [PATCH 0/7] vm, vmscan: enahance vmscan tracepoints Michal Hocko
2016-12-28 15:30 ` Michal Hocko
2016-12-28 15:30 ` [PATCH 1/7] mm, vmscan: remove unused mm_vmscan_memcg_isolate Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  7:33   ` Hillf Danton
2016-12-29  7:33     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 2/7] mm, vmscan: add active list aging tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  5:33   ` Minchan Kim [this message]
2016-12-29  5:33     ` Minchan Kim
2016-12-29  7:52     ` Michal Hocko
2016-12-29  7:52       ` Michal Hocko
2016-12-30  1:48       ` Minchan Kim
2016-12-30  1:48         ` Minchan Kim
2016-12-30  9:26         ` Michal Hocko
2016-12-30  9:26           ` Michal Hocko
2016-12-30  9:38           ` Hillf Danton
2016-12-30  9:38             ` Hillf Danton
2016-12-30 16:04           ` Minchan Kim
2016-12-30 16:04             ` Minchan Kim
2016-12-30 16:37             ` Michal Hocko
2016-12-30 16:37               ` Michal Hocko
2016-12-30 17:30               ` Michal Hocko
2016-12-30 17:30                 ` Michal Hocko
2017-01-03  5:03               ` Minchan Kim
2017-01-03  5:03                 ` Minchan Kim
2017-01-03  8:21                 ` Michal Hocko
2017-01-03  8:21                   ` Michal Hocko
2017-01-04  5:07                   ` Minchan Kim
2017-01-04  5:07                     ` Minchan Kim
2017-01-04  7:28                     ` Vlastimil Babka
2017-01-04  7:28                       ` Vlastimil Babka
2017-01-04  7:50                     ` Michal Hocko
2017-01-04  7:50                       ` Michal Hocko
2016-12-29  7:44   ` Hillf Danton
2016-12-29  7:44     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 3/7] mm, vmscan: show the number of skipped pages in mm_vmscan_lru_isolate Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  5:53   ` Minchan Kim
2016-12-29  5:53     ` Minchan Kim
2016-12-29  7:49   ` Hillf Danton
2016-12-29  7:49     ` Hillf Danton
2017-01-03 17:21   ` Vlastimil Babka
2017-01-03 17:21     ` Vlastimil Babka
2017-01-03 20:43     ` Michal Hocko
2017-01-03 20:43       ` Michal Hocko
2016-12-28 15:30 ` [PATCH 4/7] mm, vmscan: show LRU name in mm_vmscan_lru_isolate tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-28 15:50   ` Nikolay Borisov
2016-12-28 15:50     ` Nikolay Borisov
2016-12-28 16:00     ` Michal Hocko
2016-12-28 16:00       ` Michal Hocko
2016-12-28 16:40       ` Nikolay Borisov
2016-12-28 16:40         ` Nikolay Borisov
2016-12-28 16:49         ` Michal Hocko
2016-12-28 16:49           ` Michal Hocko
2016-12-29  6:02   ` Minchan Kim
2016-12-29  6:02     ` Minchan Kim
2016-12-29  7:56     ` Michal Hocko
2016-12-29  7:56       ` Michal Hocko
2016-12-30  1:56       ` Minchan Kim
2016-12-30  1:56         ` Minchan Kim
2016-12-30  9:33         ` Michal Hocko
2016-12-30  9:33           ` Michal Hocko
2016-12-29  7:53   ` Hillf Danton
2016-12-29  7:53     ` Hillf Danton
2017-01-03 17:08   ` Vlastimil Babka
2017-01-03 17:08     ` Vlastimil Babka
2017-01-03 20:47     ` Michal Hocko
2017-01-03 20:47       ` Michal Hocko
2017-01-03 20:52       ` Michal Hocko
2017-01-03 20:52         ` Michal Hocko
2017-01-03 21:24         ` Michal Hocko
2017-01-03 21:24           ` Michal Hocko
2017-01-03 21:40           ` Vlastimil Babka
2017-01-03 21:40             ` Vlastimil Babka
2017-01-03 21:48             ` Michal Hocko
2017-01-03 21:48               ` Michal Hocko
2016-12-28 15:30 ` [PATCH 5/7] mm, vmscan: extract shrink_page_list reclaim counters into a struct Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  8:00   ` Hillf Danton
2016-12-29  8:00     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 6/7] mm, vmscan: enhance mm_vmscan_lru_shrink_inactive tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  8:05   ` Hillf Danton
2016-12-29  8:05     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 7/7] mm, vmscan: add mm_vmscan_inactive_list_is_low tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  8:19   ` Hillf Danton
2016-12-29  8:19     ` Hillf Danton
2016-12-30  9:11 ` [PATCH 0/7] vm, vmscan: enahance vmscan tracepoints Mel Gorman
2016-12-30  9:11   ` Mel Gorman
2016-12-30  9:36   ` Michal Hocko
2016-12-30  9:36     ` Michal Hocko
2016-12-30 10:20     ` Mel Gorman
2016-12-30 10:20       ` Mel Gorman
2017-01-04 10:19 [PATCH 0/7 v2] " Michal Hocko
2017-01-04 10:19 ` [PATCH 2/7] mm, vmscan: add active list aging tracepoint Michal Hocko
2017-01-04 10:19   ` Michal Hocko
2017-01-04 12:52   ` Vlastimil Babka
2017-01-04 12:52     ` Vlastimil Babka
2017-01-04 13:16     ` Michal Hocko
2017-01-04 13:16       ` Michal Hocko
2017-01-04 13:34       ` Vlastimil Babka
2017-01-04 13:34         ` Vlastimil Babka
2017-01-04 13:52   ` Michal Hocko
2017-01-04 13:52     ` Michal Hocko
2017-01-05  5:41     ` Minchan Kim
2017-01-05  5:41       ` Minchan Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161229053359.GA1815@bbox \
    --to=minchan@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@kernel.org \
    --cc=mhocko@suse.com \
    --cc=riel@redhat.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.