All of lore.kernel.org
 help / color / mirror / Atom feed
* [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
@ 2014-06-13  5:58 ` Chen Yucong
  0 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-13  5:58 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel, Chen Yucong

shrink_page_list() has too many arguments that have already reached ten.
Some of those arguments and temporary variables introduces extra 80 bytes
on the stack. This patch wraps five parameters into writeback_stats and removes
some temporary variables, thus making the relative functions to consume fewer
stack space.

Before mm/vmscan.c is changed:
   text    data     bss     dec     hex filename
6876698  957224  966656 8800578  864942 vmlinux-3.15

After mm/vmscan.c is changed:
   text    data     bss     dec     hex filename
6876506  957224  966656 8800386  864882 vmlinux-3.15


scripts/checkstack.pl can be used for checking the change of the target function stack.

Before mm/vmscan.c is changed:

0xffffffff810af103 shrink_inactive_list []:		152
0xffffffff810af43d shrink_inactive_list []:		152
-------------------------------------------------------------
0xffffffff810aede8 reclaim_clean_pages_from_list []:	184
0xffffffff810aeef8 reclaim_clean_pages_from_list []:	184
-------------------------------------------------------------
0xffffffff810ae582 shrink_page_list []:			232
0xffffffff810aedb5 shrink_page_list []:			232

After mm/vmscan.c is changed::

0xffffffff810af078 shrink_inactive_list []:		120
0xffffffff810af36d shrink_inactive_list []:		120
-------------------------------------------------------------
With: struct writeback_stats dummy = {};
0xffffffff810aed6c reclaim_clean_pages_from_list []:    152
0xffffffff810aee68 reclaim_clean_pages_from_list []:    152
-------------------------------------------------------------
With: static struct writeback_stats dummy ={};
0xffffffff810aed69 reclaim_clean_pages_from_list []:    120
0xffffffff810aee4d reclaim_clean_pages_from_list []:    120
--------------------------------------------------------------------------------------
0xffffffff810ae586 shrink_page_list []:			184   ---> sub    $0xb8,%rsp
0xffffffff810aed36 shrink_page_list []:			184   ---> add    $0xb8,%rsp

Via the above figures, we can find that the difference value of the stack is 32 for
shrink_inactive_list and reclaim_clean_pages_from_list, and this value is 48(232-184)
for shrink_page_list. From the hierarchy of functions called, the total difference
value is 80(32+48) for this change.

Changes since v1: https://lkml.org/lkml/2014/6/12/159
     * Rename arg_container to writeback_stats
     * Change the the way of initializing writeback_stats object.

Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   62 ++++++++++++++++++++++++++---------------------------------
 1 file changed, 27 insertions(+), 35 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..3f28e39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -791,28 +791,31 @@ static void page_check_dirty_writeback(struct page *page,
 }
 
 /*
+ * Callers pass a prezeroed writeback_stats into the shrink functions to gather
+ * statistics about how many pages of particular states were processed
+ */
+struct writeback_stats {
+	unsigned long nr_dirty;
+	unsigned long nr_unqueued_dirty;
+	unsigned long nr_congested;
+	unsigned long nr_writeback;
+	unsigned long nr_immediate;
+};
+
+/*
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
 				      struct zone *zone,
 				      struct scan_control *sc,
 				      enum ttu_flags ttu_flags,
-				      unsigned long *ret_nr_dirty,
-				      unsigned long *ret_nr_unqueued_dirty,
-				      unsigned long *ret_nr_congested,
-				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
+				      struct writeback_stats *ws,
 				      bool force_reclaim)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
 	int pgactivate = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
 	unsigned long nr_reclaimed = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
 
 	cond_resched();
 
@@ -858,10 +861,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		 */
 		page_check_dirty_writeback(page, &dirty, &writeback);
 		if (dirty || writeback)
-			nr_dirty++;
+			ws->nr_dirty++;
 
 		if (dirty && !writeback)
-			nr_unqueued_dirty++;
+			ws->nr_unqueued_dirty++;
 
 		/*
 		 * Treat this page as congested if the underlying BDI is or if
@@ -872,7 +875,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		mapping = page_mapping(page);
 		if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
 		    (writeback && PageReclaim(page)))
-			nr_congested++;
+			ws->nr_congested++;
 
 		/*
 		 * If a page at the tail of the LRU is under writeback, there
@@ -916,7 +919,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			if (current_is_kswapd() &&
 			    PageReclaim(page) &&
 			    zone_is_reclaim_writeback(zone)) {
-				nr_immediate++;
+				ws->nr_immediate++;
 				goto keep_locked;
 
 			/* Case 2 above */
@@ -934,7 +937,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				 * and it's also appropriate in global reclaim.
 				 */
 				SetPageReclaim(page);
-				nr_writeback++;
+				ws->nr_writeback++;
 
 				goto keep_locked;
 
@@ -1132,11 +1135,6 @@ keep:
 	list_splice(&ret_pages, page_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 	mem_cgroup_uncharge_end();
-	*ret_nr_dirty += nr_dirty;
-	*ret_nr_congested += nr_congested;
-	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
-	*ret_nr_writeback += nr_writeback;
-	*ret_nr_immediate += nr_immediate;
 	return nr_reclaimed;
 }
 
@@ -1148,7 +1146,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
 	};
-	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+	unsigned long ret;
+	static struct writeback_stats dummy = { };
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
 
@@ -1161,8 +1160,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 	}
 
 	ret = shrink_page_list(&clean_pages, zone, &sc,
-			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy, true);
 	list_splice(&clean_pages, page_list);
 	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1469,11 +1467,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	struct writeback_stats ws = { };
 	isolate_mode_t isolate_mode = 0;
 	int file = is_file_lru(lru);
 	struct zone *zone = lruvec_zone(lruvec);
@@ -1515,9 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		return 0;
 
 	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
-				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+					&ws, false);
 
 	spin_lock_irq(&zone->lru_lock);
 
@@ -1554,7 +1546,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	 * of pages under pages flagged for immediate reclaim and stall if any
 	 * are encountered in the nr_immediate check below.
 	 */
-	if (nr_writeback && nr_writeback == nr_taken)
+	if (ws.nr_writeback && ws.nr_writeback == nr_taken)
 		zone_set_flag(zone, ZONE_WRITEBACK);
 
 	/*
@@ -1566,7 +1558,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * Tag a zone as congested if all the dirty pages scanned were
 		 * backed by a congested BDI and wait_iff_congested will stall.
 		 */
-		if (nr_dirty && nr_dirty == nr_congested)
+		if (ws.nr_dirty && ws.nr_dirty == ws.nr_congested)
 			zone_set_flag(zone, ZONE_CONGESTED);
 
 		/*
@@ -1576,7 +1568,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * pages from reclaim context. It will forcibly stall in the
 		 * next check.
 		 */
-		if (nr_unqueued_dirty == nr_taken)
+		if (ws.nr_unqueued_dirty == nr_taken)
 			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
 
 		/*
@@ -1585,7 +1577,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * implies that pages are cycling through the LRU faster than
 		 * they are written so also forcibly stall.
 		 */
-		if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
+		if ((ws.nr_unqueued_dirty == nr_taken || ws.nr_immediate) &&
 		    current_may_throttle())
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 	}
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
@ 2014-06-13  5:58 ` Chen Yucong
  0 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-13  5:58 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel, Chen Yucong

shrink_page_list() has too many arguments that have already reached ten.
Some of those arguments and temporary variables introduces extra 80 bytes
on the stack. This patch wraps five parameters into writeback_stats and removes
some temporary variables, thus making the relative functions to consume fewer
stack space.

Before mm/vmscan.c is changed:
   text    data     bss     dec     hex filename
6876698  957224  966656 8800578  864942 vmlinux-3.15

After mm/vmscan.c is changed:
   text    data     bss     dec     hex filename
6876506  957224  966656 8800386  864882 vmlinux-3.15


scripts/checkstack.pl can be used for checking the change of the target function stack.

Before mm/vmscan.c is changed:

0xffffffff810af103 shrink_inactive_list []:		152
0xffffffff810af43d shrink_inactive_list []:		152
-------------------------------------------------------------
0xffffffff810aede8 reclaim_clean_pages_from_list []:	184
0xffffffff810aeef8 reclaim_clean_pages_from_list []:	184
-------------------------------------------------------------
0xffffffff810ae582 shrink_page_list []:			232
0xffffffff810aedb5 shrink_page_list []:			232

After mm/vmscan.c is changed::

0xffffffff810af078 shrink_inactive_list []:		120
0xffffffff810af36d shrink_inactive_list []:		120
-------------------------------------------------------------
With: struct writeback_stats dummy = {};
0xffffffff810aed6c reclaim_clean_pages_from_list []:    152
0xffffffff810aee68 reclaim_clean_pages_from_list []:    152
-------------------------------------------------------------
With: static struct writeback_stats dummy ={};
0xffffffff810aed69 reclaim_clean_pages_from_list []:    120
0xffffffff810aee4d reclaim_clean_pages_from_list []:    120
--------------------------------------------------------------------------------------
0xffffffff810ae586 shrink_page_list []:			184   ---> sub    $0xb8,%rsp
0xffffffff810aed36 shrink_page_list []:			184   ---> add    $0xb8,%rsp

Via the above figures, we can find that the difference value of the stack is 32 for
shrink_inactive_list and reclaim_clean_pages_from_list, and this value is 48(232-184)
for shrink_page_list. From the hierarchy of functions called, the total difference
value is 80(32+48) for this change.

Changes since v1: https://lkml.org/lkml/2014/6/12/159
     * Rename arg_container to writeback_stats
     * Change the the way of initializing writeback_stats object.

Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   62 ++++++++++++++++++++++++++---------------------------------
 1 file changed, 27 insertions(+), 35 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..3f28e39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -791,28 +791,31 @@ static void page_check_dirty_writeback(struct page *page,
 }
 
 /*
+ * Callers pass a prezeroed writeback_stats into the shrink functions to gather
+ * statistics about how many pages of particular states were processed
+ */
+struct writeback_stats {
+	unsigned long nr_dirty;
+	unsigned long nr_unqueued_dirty;
+	unsigned long nr_congested;
+	unsigned long nr_writeback;
+	unsigned long nr_immediate;
+};
+
+/*
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
 				      struct zone *zone,
 				      struct scan_control *sc,
 				      enum ttu_flags ttu_flags,
-				      unsigned long *ret_nr_dirty,
-				      unsigned long *ret_nr_unqueued_dirty,
-				      unsigned long *ret_nr_congested,
-				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
+				      struct writeback_stats *ws,
 				      bool force_reclaim)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
 	int pgactivate = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
 	unsigned long nr_reclaimed = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
 
 	cond_resched();
 
@@ -858,10 +861,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		 */
 		page_check_dirty_writeback(page, &dirty, &writeback);
 		if (dirty || writeback)
-			nr_dirty++;
+			ws->nr_dirty++;
 
 		if (dirty && !writeback)
-			nr_unqueued_dirty++;
+			ws->nr_unqueued_dirty++;
 
 		/*
 		 * Treat this page as congested if the underlying BDI is or if
@@ -872,7 +875,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		mapping = page_mapping(page);
 		if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
 		    (writeback && PageReclaim(page)))
-			nr_congested++;
+			ws->nr_congested++;
 
 		/*
 		 * If a page at the tail of the LRU is under writeback, there
@@ -916,7 +919,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			if (current_is_kswapd() &&
 			    PageReclaim(page) &&
 			    zone_is_reclaim_writeback(zone)) {
-				nr_immediate++;
+				ws->nr_immediate++;
 				goto keep_locked;
 
 			/* Case 2 above */
@@ -934,7 +937,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				 * and it's also appropriate in global reclaim.
 				 */
 				SetPageReclaim(page);
-				nr_writeback++;
+				ws->nr_writeback++;
 
 				goto keep_locked;
 
@@ -1132,11 +1135,6 @@ keep:
 	list_splice(&ret_pages, page_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 	mem_cgroup_uncharge_end();
-	*ret_nr_dirty += nr_dirty;
-	*ret_nr_congested += nr_congested;
-	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
-	*ret_nr_writeback += nr_writeback;
-	*ret_nr_immediate += nr_immediate;
 	return nr_reclaimed;
 }
 
@@ -1148,7 +1146,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
 	};
-	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+	unsigned long ret;
+	static struct writeback_stats dummy = { };
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
 
@@ -1161,8 +1160,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 	}
 
 	ret = shrink_page_list(&clean_pages, zone, &sc,
-			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy, true);
 	list_splice(&clean_pages, page_list);
 	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1469,11 +1467,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	struct writeback_stats ws = { };
 	isolate_mode_t isolate_mode = 0;
 	int file = is_file_lru(lru);
 	struct zone *zone = lruvec_zone(lruvec);
@@ -1515,9 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		return 0;
 
 	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
-				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+					&ws, false);
 
 	spin_lock_irq(&zone->lru_lock);
 
@@ -1554,7 +1546,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	 * of pages under pages flagged for immediate reclaim and stall if any
 	 * are encountered in the nr_immediate check below.
 	 */
-	if (nr_writeback && nr_writeback == nr_taken)
+	if (ws.nr_writeback && ws.nr_writeback == nr_taken)
 		zone_set_flag(zone, ZONE_WRITEBACK);
 
 	/*
@@ -1566,7 +1558,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * Tag a zone as congested if all the dirty pages scanned were
 		 * backed by a congested BDI and wait_iff_congested will stall.
 		 */
-		if (nr_dirty && nr_dirty == nr_congested)
+		if (ws.nr_dirty && ws.nr_dirty == ws.nr_congested)
 			zone_set_flag(zone, ZONE_CONGESTED);
 
 		/*
@@ -1576,7 +1568,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * pages from reclaim context. It will forcibly stall in the
 		 * next check.
 		 */
-		if (nr_unqueued_dirty == nr_taken)
+		if (ws.nr_unqueued_dirty == nr_taken)
 			zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
 
 		/*
@@ -1585,7 +1577,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * implies that pages are cycling through the LRU faster than
 		 * they are written so also forcibly stall.
 		 */
-		if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
+		if ((ws.nr_unqueued_dirty == nr_taken || ws.nr_immediate) &&
 		    current_may_throttle())
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 	}
-- 
1.7.10.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
  2014-06-13  5:58 ` Chen Yucong
@ 2014-06-13 13:47   ` Chen Yucong
  -1 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-13 13:47 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel

Hi all,

On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> shrink_page_list() has too many arguments that have already reached ten.
> Some of those arguments and temporary variables introduces extra 80 bytes
> on the stack. This patch wraps five parameters into writeback_stats and removes
> some temporary variables, thus making the relative functions to consume fewer
> stack space.
> 
I this message, I have renamed shrink_result to writeback_stats
according to Johannes Weiner's reply. Think carefully, this change is
too hasty. Although it now just contains statistics on the writeback
states of the scanned pages, it may also be used for gathering other
information at some point in the future. So I think shrink_result is a
little bit better!

thx!
cyc



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
@ 2014-06-13 13:47   ` Chen Yucong
  0 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-13 13:47 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel

Hi all,

On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> shrink_page_list() has too many arguments that have already reached ten.
> Some of those arguments and temporary variables introduces extra 80 bytes
> on the stack. This patch wraps five parameters into writeback_stats and removes
> some temporary variables, thus making the relative functions to consume fewer
> stack space.
> 
I this message, I have renamed shrink_result to writeback_stats
according to Johannes Weiner's reply. Think carefully, this change is
too hasty. Although it now just contains statistics on the writeback
states of the scanned pages, it may also be used for gathering other
information at some point in the future. So I think shrink_result is a
little bit better!

thx!
cyc


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
  2014-06-13 13:47   ` Chen Yucong
@ 2014-06-13 14:17     ` Johannes Weiner
  -1 siblings, 0 replies; 8+ messages in thread
From: Johannes Weiner @ 2014-06-13 14:17 UTC (permalink / raw)
  To: Chen Yucong; +Cc: akpm, mgorman, mhocko, riel, linux-mm, linux-kernel

On Fri, Jun 13, 2014 at 09:47:39PM +0800, Chen Yucong wrote:
> Hi all,
> 
> On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> > shrink_page_list() has too many arguments that have already reached ten.
> > Some of those arguments and temporary variables introduces extra 80 bytes
> > on the stack. This patch wraps five parameters into writeback_stats and removes
> > some temporary variables, thus making the relative functions to consume fewer
> > stack space.
> > 
> I this message, I have renamed shrink_result to writeback_stats
> according to Johannes Weiner's reply. Think carefully, this change is
> too hasty. Although it now just contains statistics on the writeback
> states of the scanned pages, it may also be used for gathering other
> information at some point in the future. So I think shrink_result is a
> little bit better!

Then we can always rename it "at some point in the future", the name
is not set in stone.  At this time, it only contains writeback stats,
and I think it should be named accordingly.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
@ 2014-06-13 14:17     ` Johannes Weiner
  0 siblings, 0 replies; 8+ messages in thread
From: Johannes Weiner @ 2014-06-13 14:17 UTC (permalink / raw)
  To: Chen Yucong; +Cc: akpm, mgorman, mhocko, riel, linux-mm, linux-kernel

On Fri, Jun 13, 2014 at 09:47:39PM +0800, Chen Yucong wrote:
> Hi all,
> 
> On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> > shrink_page_list() has too many arguments that have already reached ten.
> > Some of those arguments and temporary variables introduces extra 80 bytes
> > on the stack. This patch wraps five parameters into writeback_stats and removes
> > some temporary variables, thus making the relative functions to consume fewer
> > stack space.
> > 
> I this message, I have renamed shrink_result to writeback_stats
> according to Johannes Weiner's reply. Think carefully, this change is
> too hasty. Although it now just contains statistics on the writeback
> states of the scanned pages, it may also be used for gathering other
> information at some point in the future. So I think shrink_result is a
> little bit better!

Then we can always rename it "at some point in the future", the name
is not set in stone.  At this time, it only contains writeback stats,
and I think it should be named accordingly.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
  2014-06-13  5:58 ` Chen Yucong
@ 2014-06-25  6:59   ` Chen Yucong
  -1 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-25  6:59 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel

On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> shrink_page_list() has too many arguments that have already reached ten.
> Some of those arguments and temporary variables introduces extra 80 bytes
> on the stack. This patch wraps five parameters into writeback_stats and removes
> some temporary variables, thus making the relative functions to consume fewer
> stack space.
> 
> Before mm/vmscan.c is changed:
>    text    data     bss     dec     hex filename
> 6876698  957224  966656 8800578  864942 vmlinux-3.15
> 
> After mm/vmscan.c is changed:
>    text    data     bss     dec     hex filename
> 6876506  957224  966656 8800386  864882 vmlinux-3.15
> 
> 
> scripts/checkstack.pl can be used for checking the change of the target function stack.
> 
> Before mm/vmscan.c is changed:
> 
> 0xffffffff810af103 shrink_inactive_list []:		152
> 0xffffffff810af43d shrink_inactive_list []:		152
> -------------------------------------------------------------
> 0xffffffff810aede8 reclaim_clean_pages_from_list []:	184
> 0xffffffff810aeef8 reclaim_clean_pages_from_list []:	184
> -------------------------------------------------------------
> 0xffffffff810ae582 shrink_page_list []:			232
> 0xffffffff810aedb5 shrink_page_list []:			232
> 
> After mm/vmscan.c is changed::
> 
> 0xffffffff810af078 shrink_inactive_list []:		120
> 0xffffffff810af36d shrink_inactive_list []:		120
> -------------------------------------------------------------
> With: struct writeback_stats dummy = {};
> 0xffffffff810aed6c reclaim_clean_pages_from_list []:    152
> 0xffffffff810aee68 reclaim_clean_pages_from_list []:    152
> -------------------------------------------------------------
> With: static struct writeback_stats dummy ={};
> 0xffffffff810aed69 reclaim_clean_pages_from_list []:    120
> 0xffffffff810aee4d reclaim_clean_pages_from_list []:    120
> --------------------------------------------------------------------------------------
> 0xffffffff810ae586 shrink_page_list []:			184   ---> sub    $0xb8,%rsp
> 0xffffffff810aed36 shrink_page_list []:			184   ---> add    $0xb8,%rsp
> 
> Via the above figures, we can find that the difference value of the stack is 32 for
> shrink_inactive_list and reclaim_clean_pages_from_list, and this value is 48(232-184)
> for shrink_page_list. From the hierarchy of functions called, the total difference
> value is 80(32+48) for this change.
> 
Hi all, 

Perhaps the fix that has been done by this patch does not quite make
sense. But I still think it is necessary to explain why we should do
this.

thx!
cyc


Until now, shrink_page_list() has too many arguments that have already
reached ten. For the kernel, this is not a good thing. Not only does it
consume the stack space, but also the additional operations of the
parameters increases the code size. In addition, it will increase the
number of memory access, especially for those architectures that have
relatively small number of registers. Therefore, limiting the number of
arguments is probably a good thing.

Via historical commit messages, we can know that those arguments related
to writeback stats were introduced one by one instead of at the same
time. We can not guarantee whether some new parameters will be
introduced at some point in the future. If that happens, then relative
code must be cleaned up. Perhaps we need to make some rules for kernel
development, so that developers to know what action should be done when
there are too many arguments that will be passed.

This patch wraps five parameters into `struct writeback_stats' for
reducing the stack consumption and code size. We can also use a array
for those writeback stats, but `struct' is more clearly.


Wrapping five parameters into `writeback_stats' save 320 bytes of text.

   text    data     bss     dec     hex filename
5701904 1274800 1052672 8029376  7a84c0 vmlinux-3.15-wrap
5702224 1274800 1052672 8029696  7a8600 vmlinux-3.15

At same time, it can save 128 bytes of stack.
                                            3.15   3.15-wrap
+0/-128 -128
shrink_inactive_list                         136     120     -16
shrink_page_list                             216     168     -48
reclaim_clean_pages_from_list                184     120     -64


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption
@ 2014-06-25  6:59   ` Chen Yucong
  0 siblings, 0 replies; 8+ messages in thread
From: Chen Yucong @ 2014-06-25  6:59 UTC (permalink / raw)
  To: akpm; +Cc: mgorman, hannes, mhocko, riel, linux-mm, linux-kernel

On Fri, 2014-06-13 at 13:58 +0800, Chen Yucong wrote:
> shrink_page_list() has too many arguments that have already reached ten.
> Some of those arguments and temporary variables introduces extra 80 bytes
> on the stack. This patch wraps five parameters into writeback_stats and removes
> some temporary variables, thus making the relative functions to consume fewer
> stack space.
> 
> Before mm/vmscan.c is changed:
>    text    data     bss     dec     hex filename
> 6876698  957224  966656 8800578  864942 vmlinux-3.15
> 
> After mm/vmscan.c is changed:
>    text    data     bss     dec     hex filename
> 6876506  957224  966656 8800386  864882 vmlinux-3.15
> 
> 
> scripts/checkstack.pl can be used for checking the change of the target function stack.
> 
> Before mm/vmscan.c is changed:
> 
> 0xffffffff810af103 shrink_inactive_list []:		152
> 0xffffffff810af43d shrink_inactive_list []:		152
> -------------------------------------------------------------
> 0xffffffff810aede8 reclaim_clean_pages_from_list []:	184
> 0xffffffff810aeef8 reclaim_clean_pages_from_list []:	184
> -------------------------------------------------------------
> 0xffffffff810ae582 shrink_page_list []:			232
> 0xffffffff810aedb5 shrink_page_list []:			232
> 
> After mm/vmscan.c is changed::
> 
> 0xffffffff810af078 shrink_inactive_list []:		120
> 0xffffffff810af36d shrink_inactive_list []:		120
> -------------------------------------------------------------
> With: struct writeback_stats dummy = {};
> 0xffffffff810aed6c reclaim_clean_pages_from_list []:    152
> 0xffffffff810aee68 reclaim_clean_pages_from_list []:    152
> -------------------------------------------------------------
> With: static struct writeback_stats dummy ={};
> 0xffffffff810aed69 reclaim_clean_pages_from_list []:    120
> 0xffffffff810aee4d reclaim_clean_pages_from_list []:    120
> --------------------------------------------------------------------------------------
> 0xffffffff810ae586 shrink_page_list []:			184   ---> sub    $0xb8,%rsp
> 0xffffffff810aed36 shrink_page_list []:			184   ---> add    $0xb8,%rsp
> 
> Via the above figures, we can find that the difference value of the stack is 32 for
> shrink_inactive_list and reclaim_clean_pages_from_list, and this value is 48(232-184)
> for shrink_page_list. From the hierarchy of functions called, the total difference
> value is 80(32+48) for this change.
> 
Hi all, 

Perhaps the fix that has been done by this patch does not quite make
sense. But I still think it is necessary to explain why we should do
this.

thx!
cyc


Until now, shrink_page_list() has too many arguments that have already
reached ten. For the kernel, this is not a good thing. Not only does it
consume the stack space, but also the additional operations of the
parameters increases the code size. In addition, it will increase the
number of memory access, especially for those architectures that have
relatively small number of registers. Therefore, limiting the number of
arguments is probably a good thing.

Via historical commit messages, we can know that those arguments related
to writeback stats were introduced one by one instead of at the same
time. We can not guarantee whether some new parameters will be
introduced at some point in the future. If that happens, then relative
code must be cleaned up. Perhaps we need to make some rules for kernel
development, so that developers to know what action should be done when
there are too many arguments that will be passed.

This patch wraps five parameters into `struct writeback_stats' for
reducing the stack consumption and code size. We can also use a array
for those writeback stats, but `struct' is more clearly.


Wrapping five parameters into `writeback_stats' save 320 bytes of text.

   text    data     bss     dec     hex filename
5701904 1274800 1052672 8029376  7a84c0 vmlinux-3.15-wrap
5702224 1274800 1052672 8029696  7a8600 vmlinux-3.15

At same time, it can save 128 bytes of stack.
                                            3.15   3.15-wrap
+0/-128 -128
shrink_inactive_list                         136     120     -16
shrink_page_list                             216     168     -48
reclaim_clean_pages_from_list                184     120     -64

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-06-25  7:00 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-06-13  5:58 [RESEND PATCH v2] mm/vmscan.c: wrap five parameters into writeback_stats for reducing the stack consumption Chen Yucong
2014-06-13  5:58 ` Chen Yucong
2014-06-13 13:47 ` Chen Yucong
2014-06-13 13:47   ` Chen Yucong
2014-06-13 14:17   ` Johannes Weiner
2014-06-13 14:17     ` Johannes Weiner
2014-06-25  6:59 ` Chen Yucong
2014-06-25  6:59   ` Chen Yucong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.