linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm/vmscan.c: change prototype for shrink_page_list
       [not found] <CGME20200429135110epcas5p4377ad1a5d783d0634ab06e23993116a7@epcas5p4.samsung.com>
@ 2020-04-29 13:50 ` Maninder Singh
  2020-04-29 14:22   ` Michal Hocko
  0 siblings, 1 reply; 2+ messages in thread
From: Maninder Singh @ 2020-04-29 13:50 UTC (permalink / raw)
  To: mhocko, akpm; +Cc: linux-mm, linux-kernel, a.sahrawat, v.narang, Maninder Singh

'commit 3c710c1ad11b ("mm, vmscan:
extract shrink_page_list reclaim counters into a struct")'

changed data type for the function,
so changing return type for funciton and its caller.

Signed-off-by: Vaneet Narang <v.narang@samsung.com>
Signed-off-by: Maninder Singh <maninder1.s@samsung.com>
---
v1 -> v2: position of variable changed mistakenly, thus reverted.
v2 -> v3: Don't change position of any variable, thus reverted.
	  if required then need to send by separate patch.

 mm/internal.h   |  2 +-
 mm/page_alloc.c |  2 +-
 mm/vmscan.c     | 24 ++++++++++++------------
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index b5634e7..c3eeec8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -527,7 +527,7 @@ extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long);
 
 extern void set_pageblock_order(void);
-unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 					    struct list_head *page_list);
 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
 #define ALLOC_WMARK_MIN		WMARK_MIN
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1385d78..f17d88c6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8416,7 +8416,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
 					unsigned long start, unsigned long end)
 {
 	/* This function is based on compact_zone() from compaction.c. */
-	unsigned long nr_reclaimed;
+	unsigned int nr_reclaimed;
 	unsigned long pfn = start;
 	unsigned int tries = 0;
 	int ret = 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b06868f..7631725 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1066,17 +1066,17 @@ static void page_check_dirty_writeback(struct page *page,
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
-static unsigned long shrink_page_list(struct list_head *page_list,
-				      struct pglist_data *pgdat,
-				      struct scan_control *sc,
-				      enum ttu_flags ttu_flags,
-				      struct reclaim_stat *stat,
-				      bool ignore_references)
+static unsigned int shrink_page_list(struct list_head *page_list,
+				     struct pglist_data *pgdat,
+				     struct scan_control *sc,
+				     enum ttu_flags ttu_flags,
+				     struct reclaim_stat *stat,
+				     bool ignore_references)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
-	unsigned nr_reclaimed = 0;
-	unsigned pgactivate = 0;
+	unsigned int nr_reclaimed = 0;
+	unsigned int pgactivate = 0;
 
 	memset(stat, 0, sizeof(*stat));
 	cond_resched();
@@ -1483,7 +1483,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 	return nr_reclaimed;
 }
 
-unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 					    struct list_head *page_list)
 {
 	struct scan_control sc = {
@@ -1492,7 +1492,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.may_unmap = 1,
 	};
 	struct reclaim_stat dummy_stat;
-	unsigned long ret;
+	unsigned int ret;
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
 
@@ -1900,7 +1900,7 @@ static int current_may_throttle(void)
 {
 	LIST_HEAD(page_list);
 	unsigned long nr_scanned;
-	unsigned long nr_reclaimed = 0;
+	unsigned int nr_reclaimed = 0;
 	unsigned long nr_taken;
 	struct reclaim_stat stat;
 	int file = is_file_lru(lru);
@@ -2096,7 +2096,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
 unsigned long reclaim_pages(struct list_head *page_list)
 {
 	int nid = NUMA_NO_NODE;
-	unsigned long nr_reclaimed = 0;
+	unsigned int nr_reclaimed = 0;
 	LIST_HEAD(node_page_list);
 	struct reclaim_stat dummy_stat;
 	struct page *page;
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v3] mm/vmscan.c: change prototype for shrink_page_list
  2020-04-29 13:50 ` [PATCH v3] mm/vmscan.c: change prototype for shrink_page_list Maninder Singh
@ 2020-04-29 14:22   ` Michal Hocko
  0 siblings, 0 replies; 2+ messages in thread
From: Michal Hocko @ 2020-04-29 14:22 UTC (permalink / raw)
  To: Maninder Singh; +Cc: akpm, linux-mm, linux-kernel, a.sahrawat, v.narang

On Wed 29-04-20 19:20:59, Maninder Singh wrote:
> 'commit 3c710c1ad11b ("mm, vmscan:
> extract shrink_page_list reclaim counters into a struct")'
> 
> changed data type for the function,
> so changing return type for funciton and its caller.
> 
> Signed-off-by: Vaneet Narang <v.narang@samsung.com>
> Signed-off-by: Maninder Singh <maninder1.s@samsung.com>

You could have kept my ack from v1
Acked-by: Michal Hocko <mhocko@suse.com>

Thanks!

> ---
> v1 -> v2: position of variable changed mistakenly, thus reverted.
> v2 -> v3: Don't change position of any variable, thus reverted.
> 	  if required then need to send by separate patch.
> 
>  mm/internal.h   |  2 +-
>  mm/page_alloc.c |  2 +-
>  mm/vmscan.c     | 24 ++++++++++++------------
>  3 files changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index b5634e7..c3eeec8 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -527,7 +527,7 @@ extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
>          unsigned long, unsigned long);
>  
>  extern void set_pageblock_order(void);
> -unsigned long reclaim_clean_pages_from_list(struct zone *zone,
> +unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>  					    struct list_head *page_list);
>  /* The ALLOC_WMARK bits are used as an index to zone->watermark */
>  #define ALLOC_WMARK_MIN		WMARK_MIN
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 1385d78..f17d88c6 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -8416,7 +8416,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
>  					unsigned long start, unsigned long end)
>  {
>  	/* This function is based on compact_zone() from compaction.c. */
> -	unsigned long nr_reclaimed;
> +	unsigned int nr_reclaimed;
>  	unsigned long pfn = start;
>  	unsigned int tries = 0;
>  	int ret = 0;
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index b06868f..7631725 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1066,17 +1066,17 @@ static void page_check_dirty_writeback(struct page *page,
>  /*
>   * shrink_page_list() returns the number of reclaimed pages
>   */
> -static unsigned long shrink_page_list(struct list_head *page_list,
> -				      struct pglist_data *pgdat,
> -				      struct scan_control *sc,
> -				      enum ttu_flags ttu_flags,
> -				      struct reclaim_stat *stat,
> -				      bool ignore_references)
> +static unsigned int shrink_page_list(struct list_head *page_list,
> +				     struct pglist_data *pgdat,
> +				     struct scan_control *sc,
> +				     enum ttu_flags ttu_flags,
> +				     struct reclaim_stat *stat,
> +				     bool ignore_references)
>  {
>  	LIST_HEAD(ret_pages);
>  	LIST_HEAD(free_pages);
> -	unsigned nr_reclaimed = 0;
> -	unsigned pgactivate = 0;
> +	unsigned int nr_reclaimed = 0;
> +	unsigned int pgactivate = 0;
>  
>  	memset(stat, 0, sizeof(*stat));
>  	cond_resched();
> @@ -1483,7 +1483,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  	return nr_reclaimed;
>  }
>  
> -unsigned long reclaim_clean_pages_from_list(struct zone *zone,
> +unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>  					    struct list_head *page_list)
>  {
>  	struct scan_control sc = {
> @@ -1492,7 +1492,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
>  		.may_unmap = 1,
>  	};
>  	struct reclaim_stat dummy_stat;
> -	unsigned long ret;
> +	unsigned int ret;
>  	struct page *page, *next;
>  	LIST_HEAD(clean_pages);
>  
> @@ -1900,7 +1900,7 @@ static int current_may_throttle(void)
>  {
>  	LIST_HEAD(page_list);
>  	unsigned long nr_scanned;
> -	unsigned long nr_reclaimed = 0;
> +	unsigned int nr_reclaimed = 0;
>  	unsigned long nr_taken;
>  	struct reclaim_stat stat;
>  	int file = is_file_lru(lru);
> @@ -2096,7 +2096,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  unsigned long reclaim_pages(struct list_head *page_list)
>  {
>  	int nid = NUMA_NO_NODE;
> -	unsigned long nr_reclaimed = 0;
> +	unsigned int nr_reclaimed = 0;
>  	LIST_HEAD(node_page_list);
>  	struct reclaim_stat dummy_stat;
>  	struct page *page;
> -- 
> 1.9.1
> 

-- 
Michal Hocko
SUSE Labs


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-04-29 14:22 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <CGME20200429135110epcas5p4377ad1a5d783d0634ab06e23993116a7@epcas5p4.samsung.com>
2020-04-29 13:50 ` [PATCH v3] mm/vmscan.c: change prototype for shrink_page_list Maninder Singh
2020-04-29 14:22   ` Michal Hocko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).