All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list()
@ 2019-01-31 15:37 Kirill Tkhai
  2019-01-31 16:52 ` Daniel Jordan
  2019-02-01 11:26 ` Michal Hocko
  0 siblings, 2 replies; 4+ messages in thread
From: Kirill Tkhai @ 2019-01-31 15:37 UTC (permalink / raw)
  To: akpm, mhocko, hannes, ktkhai, linux-mm, linux-kernel

On path shrink_inactive_list() ---> shrink_page_list()
we allocate stack variables for the statistics twice.
This is completely useless, and this just consumes stack
much more, then we really need.

The patch kills duplicate stack variables from shrink_page_list(),
and this reduce stack usage and object file size significantly:

Stack usage:
Before: vmscan.c:1122:22:shrink_page_list	648	static
After:  vmscan.c:1122:22:shrink_page_list	616	static

Size of vmscan.o:
         text	   data	    bss	    dec	    hex	filename
Before: 56866	   4720	    128	  61714	   f112	mm/vmscan.o
After:  56770	   4720	    128	  61618	   f0b2	mm/vmscan.o

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 mm/vmscan.c |   44 ++++++++++++++------------------------------
 1 file changed, 14 insertions(+), 30 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index dd9554f5d788..54a389fd91e2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1128,16 +1128,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
-	int pgactivate = 0;
-	unsigned nr_unqueued_dirty = 0;
-	unsigned nr_dirty = 0;
-	unsigned nr_congested = 0;
 	unsigned nr_reclaimed = 0;
-	unsigned nr_writeback = 0;
-	unsigned nr_immediate = 0;
-	unsigned nr_ref_keep = 0;
-	unsigned nr_unmap_fail = 0;
 
+	memset(stat, 0, sizeof(*stat));
 	cond_resched();
 
 	while (!list_empty(page_list)) {
@@ -1181,10 +1174,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		 */
 		page_check_dirty_writeback(page, &dirty, &writeback);
 		if (dirty || writeback)
-			nr_dirty++;
+			stat->nr_dirty++;
 
 		if (dirty && !writeback)
-			nr_unqueued_dirty++;
+			stat->nr_unqueued_dirty++;
 
 		/*
 		 * Treat this page as congested if the underlying BDI is or if
@@ -1196,7 +1189,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		if (((dirty || writeback) && mapping &&
 		     inode_write_congested(mapping->host)) ||
 		    (writeback && PageReclaim(page)))
-			nr_congested++;
+			stat->nr_congested++;
 
 		/*
 		 * If a page at the tail of the LRU is under writeback, there
@@ -1245,7 +1238,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			if (current_is_kswapd() &&
 			    PageReclaim(page) &&
 			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
-				nr_immediate++;
+				stat->nr_immediate++;
 				goto activate_locked;
 
 			/* Case 2 above */
@@ -1263,7 +1256,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				 * and it's also appropriate in global reclaim.
 				 */
 				SetPageReclaim(page);
-				nr_writeback++;
+				stat->nr_writeback++;
 				goto activate_locked;
 
 			/* Case 3 above */
@@ -1283,7 +1276,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		case PAGEREF_ACTIVATE:
 			goto activate_locked;
 		case PAGEREF_KEEP:
-			nr_ref_keep++;
+			stat->nr_ref_keep++;
 			goto keep_locked;
 		case PAGEREF_RECLAIM:
 		case PAGEREF_RECLAIM_CLEAN:
@@ -1348,7 +1341,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			if (unlikely(PageTransHuge(page)))
 				flags |= TTU_SPLIT_HUGE_PMD;
 			if (!try_to_unmap(page, flags)) {
-				nr_unmap_fail++;
+				stat->nr_unmap_fail++;
 				goto activate_locked;
 			}
 		}
@@ -1496,7 +1489,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		VM_BUG_ON_PAGE(PageActive(page), page);
 		if (!PageMlocked(page)) {
 			SetPageActive(page);
-			pgactivate++;
+			stat->nr_activate++;
 			count_memcg_page_event(page, PGACTIVATE);
 		}
 keep_locked:
@@ -1511,18 +1504,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 	free_unref_page_list(&free_pages);
 
 	list_splice(&ret_pages, page_list);
-	count_vm_events(PGACTIVATE, pgactivate);
-
-	if (stat) {
-		stat->nr_dirty = nr_dirty;
-		stat->nr_congested = nr_congested;
-		stat->nr_unqueued_dirty = nr_unqueued_dirty;
-		stat->nr_writeback = nr_writeback;
-		stat->nr_immediate = nr_immediate;
-		stat->nr_activate = pgactivate;
-		stat->nr_ref_keep = nr_ref_keep;
-		stat->nr_unmap_fail = nr_unmap_fail;
-	}
+	count_vm_events(PGACTIVATE, stat->nr_activate);
+
 	return nr_reclaimed;
 }
 
@@ -1534,6 +1517,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
 	};
+	struct reclaim_stat dummy_stat;
 	unsigned long ret;
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
@@ -1547,7 +1531,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 	}
 
 	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
-			TTU_IGNORE_ACCESS, NULL, true);
+			TTU_IGNORE_ACCESS, &dummy_stat, true);
 	list_splice(&clean_pages, page_list);
 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1922,7 +1906,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	struct reclaim_stat stat = {};
+	struct reclaim_stat stat;
 	int file = is_file_lru(lru);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list()
  2019-01-31 15:37 [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list() Kirill Tkhai
@ 2019-01-31 16:52 ` Daniel Jordan
  2019-02-01 11:26 ` Michal Hocko
  1 sibling, 0 replies; 4+ messages in thread
From: Daniel Jordan @ 2019-01-31 16:52 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: akpm, mhocko, hannes, linux-mm, linux-kernel

On Thu, Jan 31, 2019 at 06:37:02PM +0300, Kirill Tkhai wrote:
> On path shrink_inactive_list() ---> shrink_page_list()
> we allocate stack variables for the statistics twice.
> This is completely useless, and this just consumes stack
> much more, then we really need.
> 
> The patch kills duplicate stack variables from shrink_page_list(),
> and this reduce stack usage and object file size significantly:
> 
> Stack usage:
> Before: vmscan.c:1122:22:shrink_page_list	648	static
> After:  vmscan.c:1122:22:shrink_page_list	616	static
> 
> Size of vmscan.o:
>          text	   data	    bss	    dec	    hex	filename
> Before: 56866	   4720	    128	  61714	   f112	mm/vmscan.o
> After:  56770	   4720	    128	  61618	   f0b2	mm/vmscan.o
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  mm/vmscan.c |   44 ++++++++++++++------------------------------
>  1 file changed, 14 insertions(+), 30 deletions(-)


> @@ -1534,6 +1517,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>  		.priority = DEF_PRIORITY,
>  		.may_unmap = 1,
>  	};
> +	struct reclaim_stat dummy_stat;
>  	unsigned long ret;
>  	struct page *page, *next;
>  	LIST_HEAD(clean_pages);
> @@ -1547,7 +1531,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>  	}
>  
>  	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
> -			TTU_IGNORE_ACCESS, NULL, true);
> +			TTU_IGNORE_ACCESS, &dummy_stat, true);
>  	list_splice(&clean_pages, page_list);
>  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
>  	return ret;

Stack usage stays the same coming from reclaim_clean_pages_from_list, with a
dummy variable added back after many were taken away in 3c710c1ad11b ("mm,
vmscan: extract shrink_page_list...").

But overall seems like a win to me.  You can add

Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list()
  2019-01-31 15:37 [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list() Kirill Tkhai
  2019-01-31 16:52 ` Daniel Jordan
@ 2019-02-01 11:26 ` Michal Hocko
  2019-02-01 11:59   ` Kirill Tkhai
  1 sibling, 1 reply; 4+ messages in thread
From: Michal Hocko @ 2019-02-01 11:26 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: akpm, hannes, linux-mm, linux-kernel

On Thu 31-01-19 18:37:02, Kirill Tkhai wrote:
> On path shrink_inactive_list() ---> shrink_page_list()
> we allocate stack variables for the statistics twice.
> This is completely useless, and this just consumes stack
> much more, then we really need.
> 
> The patch kills duplicate stack variables from shrink_page_list(),
> and this reduce stack usage and object file size significantly:

significantly is a bit of an overstatement for 32B saved...

> Stack usage:
> Before: vmscan.c:1122:22:shrink_page_list	648	static
> After:  vmscan.c:1122:22:shrink_page_list	616	static
> 
> Size of vmscan.o:
>          text	   data	    bss	    dec	    hex	filename
> Before: 56866	   4720	    128	  61714	   f112	mm/vmscan.o
> After:  56770	   4720	    128	  61618	   f0b2	mm/vmscan.o
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  mm/vmscan.c |   44 ++++++++++++++------------------------------
>  1 file changed, 14 insertions(+), 30 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index dd9554f5d788..54a389fd91e2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1128,16 +1128,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  {
>  	LIST_HEAD(ret_pages);
>  	LIST_HEAD(free_pages);
> -	int pgactivate = 0;
> -	unsigned nr_unqueued_dirty = 0;
> -	unsigned nr_dirty = 0;
> -	unsigned nr_congested = 0;
>  	unsigned nr_reclaimed = 0;
> -	unsigned nr_writeback = 0;
> -	unsigned nr_immediate = 0;
> -	unsigned nr_ref_keep = 0;
> -	unsigned nr_unmap_fail = 0;
>  
> +	memset(stat, 0, sizeof(*stat));
>  	cond_resched();
>  
>  	while (!list_empty(page_list)) {
> @@ -1181,10 +1174,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  		 */
>  		page_check_dirty_writeback(page, &dirty, &writeback);
>  		if (dirty || writeback)
> -			nr_dirty++;
> +			stat->nr_dirty++;
>  
>  		if (dirty && !writeback)
> -			nr_unqueued_dirty++;
> +			stat->nr_unqueued_dirty++;
>  
>  		/*
>  		 * Treat this page as congested if the underlying BDI is or if
> @@ -1196,7 +1189,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  		if (((dirty || writeback) && mapping &&
>  		     inode_write_congested(mapping->host)) ||
>  		    (writeback && PageReclaim(page)))
> -			nr_congested++;
> +			stat->nr_congested++;
>  
>  		/*
>  		 * If a page at the tail of the LRU is under writeback, there
> @@ -1245,7 +1238,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  			if (current_is_kswapd() &&
>  			    PageReclaim(page) &&
>  			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
> -				nr_immediate++;
> +				stat->nr_immediate++;
>  				goto activate_locked;
>  
>  			/* Case 2 above */
> @@ -1263,7 +1256,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  				 * and it's also appropriate in global reclaim.
>  				 */
>  				SetPageReclaim(page);
> -				nr_writeback++;
> +				stat->nr_writeback++;
>  				goto activate_locked;
>  
>  			/* Case 3 above */
> @@ -1283,7 +1276,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  		case PAGEREF_ACTIVATE:
>  			goto activate_locked;
>  		case PAGEREF_KEEP:
> -			nr_ref_keep++;
> +			stat->nr_ref_keep++;
>  			goto keep_locked;
>  		case PAGEREF_RECLAIM:
>  		case PAGEREF_RECLAIM_CLEAN:
> @@ -1348,7 +1341,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  			if (unlikely(PageTransHuge(page)))
>  				flags |= TTU_SPLIT_HUGE_PMD;
>  			if (!try_to_unmap(page, flags)) {
> -				nr_unmap_fail++;
> +				stat->nr_unmap_fail++;
>  				goto activate_locked;
>  			}
>  		}
> @@ -1496,7 +1489,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  		VM_BUG_ON_PAGE(PageActive(page), page);
>  		if (!PageMlocked(page)) {
>  			SetPageActive(page);
> -			pgactivate++;
> +			stat->nr_activate++;
>  			count_memcg_page_event(page, PGACTIVATE);
>  		}
>  keep_locked:
> @@ -1511,18 +1504,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  	free_unref_page_list(&free_pages);
>  
>  	list_splice(&ret_pages, page_list);
> -	count_vm_events(PGACTIVATE, pgactivate);
> -
> -	if (stat) {
> -		stat->nr_dirty = nr_dirty;
> -		stat->nr_congested = nr_congested;
> -		stat->nr_unqueued_dirty = nr_unqueued_dirty;
> -		stat->nr_writeback = nr_writeback;
> -		stat->nr_immediate = nr_immediate;
> -		stat->nr_activate = pgactivate;
> -		stat->nr_ref_keep = nr_ref_keep;
> -		stat->nr_unmap_fail = nr_unmap_fail;
> -	}
> +	count_vm_events(PGACTIVATE, stat->nr_activate);
> +
>  	return nr_reclaimed;
>  }
>  
> @@ -1534,6 +1517,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>  		.priority = DEF_PRIORITY,
>  		.may_unmap = 1,
>  	};
> +	struct reclaim_stat dummy_stat;
>  	unsigned long ret;
>  	struct page *page, *next;
>  	LIST_HEAD(clean_pages);
> @@ -1547,7 +1531,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>  	}
>  
>  	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
> -			TTU_IGNORE_ACCESS, NULL, true);
> +			TTU_IGNORE_ACCESS, &dummy_stat, true);
>  	list_splice(&clean_pages, page_list);
>  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
>  	return ret;
> @@ -1922,7 +1906,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
>  	unsigned long nr_scanned;
>  	unsigned long nr_reclaimed = 0;
>  	unsigned long nr_taken;
> -	struct reclaim_stat stat = {};
> +	struct reclaim_stat stat;
>  	int file = is_file_lru(lru);
>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list()
  2019-02-01 11:26 ` Michal Hocko
@ 2019-02-01 11:59   ` Kirill Tkhai
  0 siblings, 0 replies; 4+ messages in thread
From: Kirill Tkhai @ 2019-02-01 11:59 UTC (permalink / raw)
  To: Michal Hocko; +Cc: akpm, hannes, linux-mm, linux-kernel

On 01.02.2019 14:26, Michal Hocko wrote:
> On Thu 31-01-19 18:37:02, Kirill Tkhai wrote:
>> On path shrink_inactive_list() ---> shrink_page_list()
>> we allocate stack variables for the statistics twice.
>> This is completely useless, and this just consumes stack
>> much more, then we really need.
>>
>> The patch kills duplicate stack variables from shrink_page_list(),
>> and this reduce stack usage and object file size significantly:
> 
> significantly is a bit of an overstatement for 32B saved...

32/648*100 = 4.93827160493827160400

Almost 5%. I think it's not so bad...

>> Stack usage:
>> Before: vmscan.c:1122:22:shrink_page_list	648	static
>> After:  vmscan.c:1122:22:shrink_page_list	616	static
>>
>> Size of vmscan.o:
>>          text	   data	    bss	    dec	    hex	filename
>> Before: 56866	   4720	    128	  61714	   f112	mm/vmscan.o
>> After:  56770	   4720	    128	  61618	   f0b2	mm/vmscan.o
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> 
> Acked-by: Michal Hocko <mhocko@suse.com>
> 
>> ---
>>  mm/vmscan.c |   44 ++++++++++++++------------------------------
>>  1 file changed, 14 insertions(+), 30 deletions(-)
>>
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index dd9554f5d788..54a389fd91e2 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1128,16 +1128,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  {
>>  	LIST_HEAD(ret_pages);
>>  	LIST_HEAD(free_pages);
>> -	int pgactivate = 0;
>> -	unsigned nr_unqueued_dirty = 0;
>> -	unsigned nr_dirty = 0;
>> -	unsigned nr_congested = 0;
>>  	unsigned nr_reclaimed = 0;
>> -	unsigned nr_writeback = 0;
>> -	unsigned nr_immediate = 0;
>> -	unsigned nr_ref_keep = 0;
>> -	unsigned nr_unmap_fail = 0;
>>  
>> +	memset(stat, 0, sizeof(*stat));
>>  	cond_resched();
>>  
>>  	while (!list_empty(page_list)) {
>> @@ -1181,10 +1174,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  		 */
>>  		page_check_dirty_writeback(page, &dirty, &writeback);
>>  		if (dirty || writeback)
>> -			nr_dirty++;
>> +			stat->nr_dirty++;
>>  
>>  		if (dirty && !writeback)
>> -			nr_unqueued_dirty++;
>> +			stat->nr_unqueued_dirty++;
>>  
>>  		/*
>>  		 * Treat this page as congested if the underlying BDI is or if
>> @@ -1196,7 +1189,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  		if (((dirty || writeback) && mapping &&
>>  		     inode_write_congested(mapping->host)) ||
>>  		    (writeback && PageReclaim(page)))
>> -			nr_congested++;
>> +			stat->nr_congested++;
>>  
>>  		/*
>>  		 * If a page at the tail of the LRU is under writeback, there
>> @@ -1245,7 +1238,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  			if (current_is_kswapd() &&
>>  			    PageReclaim(page) &&
>>  			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
>> -				nr_immediate++;
>> +				stat->nr_immediate++;
>>  				goto activate_locked;
>>  
>>  			/* Case 2 above */
>> @@ -1263,7 +1256,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  				 * and it's also appropriate in global reclaim.
>>  				 */
>>  				SetPageReclaim(page);
>> -				nr_writeback++;
>> +				stat->nr_writeback++;
>>  				goto activate_locked;
>>  
>>  			/* Case 3 above */
>> @@ -1283,7 +1276,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  		case PAGEREF_ACTIVATE:
>>  			goto activate_locked;
>>  		case PAGEREF_KEEP:
>> -			nr_ref_keep++;
>> +			stat->nr_ref_keep++;
>>  			goto keep_locked;
>>  		case PAGEREF_RECLAIM:
>>  		case PAGEREF_RECLAIM_CLEAN:
>> @@ -1348,7 +1341,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  			if (unlikely(PageTransHuge(page)))
>>  				flags |= TTU_SPLIT_HUGE_PMD;
>>  			if (!try_to_unmap(page, flags)) {
>> -				nr_unmap_fail++;
>> +				stat->nr_unmap_fail++;
>>  				goto activate_locked;
>>  			}
>>  		}
>> @@ -1496,7 +1489,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  		VM_BUG_ON_PAGE(PageActive(page), page);
>>  		if (!PageMlocked(page)) {
>>  			SetPageActive(page);
>> -			pgactivate++;
>> +			stat->nr_activate++;
>>  			count_memcg_page_event(page, PGACTIVATE);
>>  		}
>>  keep_locked:
>> @@ -1511,18 +1504,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>>  	free_unref_page_list(&free_pages);
>>  
>>  	list_splice(&ret_pages, page_list);
>> -	count_vm_events(PGACTIVATE, pgactivate);
>> -
>> -	if (stat) {
>> -		stat->nr_dirty = nr_dirty;
>> -		stat->nr_congested = nr_congested;
>> -		stat->nr_unqueued_dirty = nr_unqueued_dirty;
>> -		stat->nr_writeback = nr_writeback;
>> -		stat->nr_immediate = nr_immediate;
>> -		stat->nr_activate = pgactivate;
>> -		stat->nr_ref_keep = nr_ref_keep;
>> -		stat->nr_unmap_fail = nr_unmap_fail;
>> -	}
>> +	count_vm_events(PGACTIVATE, stat->nr_activate);
>> +
>>  	return nr_reclaimed;
>>  }
>>  
>> @@ -1534,6 +1517,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>>  		.priority = DEF_PRIORITY,
>>  		.may_unmap = 1,
>>  	};
>> +	struct reclaim_stat dummy_stat;
>>  	unsigned long ret;
>>  	struct page *page, *next;
>>  	LIST_HEAD(clean_pages);
>> @@ -1547,7 +1531,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>>  	}
>>  
>>  	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
>> -			TTU_IGNORE_ACCESS, NULL, true);
>> +			TTU_IGNORE_ACCESS, &dummy_stat, true);
>>  	list_splice(&clean_pages, page_list);
>>  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
>>  	return ret;
>> @@ -1922,7 +1906,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
>>  	unsigned long nr_scanned;
>>  	unsigned long nr_reclaimed = 0;
>>  	unsigned long nr_taken;
>> -	struct reclaim_stat stat = {};
>> +	struct reclaim_stat stat;
>>  	int file = is_file_lru(lru);
>>  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
>>
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-02-01 11:59 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-31 15:37 [PATCH] mm: Do not allocate duplicate stack variables in shrink_page_list() Kirill Tkhai
2019-01-31 16:52 ` Daniel Jordan
2019-02-01 11:26 ` Michal Hocko
2019-02-01 11:59   ` Kirill Tkhai

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.