All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv4] mm: skip CMA pages when they are not available
@ 2023-05-22  6:36 zhaoyang.huang
  2023-05-25 20:03 ` Matthew Wilcox
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: zhaoyang.huang @ 2023-05-22  6:36 UTC (permalink / raw)
  To: Andrew Morton, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, Zhaoyang Huang, ke.wang

From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

This patch fixes unproductive reclaiming of CMA pages by skipping them when they
are not available for current context. It is arise from bellowing OOM issue, which
caused by large proportion of MIGRATE_CMA pages among free pages.

[   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
[   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
[   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
...
[   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
[   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
[   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
v2: update commit message and fix build error when CONFIG_CMA is not set
v3,v4: update code and comments
---
---
 mm/vmscan.c | 23 ++++++++++++++++++++++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd6637f..20facec 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2193,6 +2193,26 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
 
 }
 
+#ifdef CONFIG_CMA
+/*
+ * It is waste of effort to scan and reclaim CMA pages if it is not available
+ * for current allocation context
+ */
+static bool skip_cma(struct folio *folio, struct scan_control *sc)
+{
+	if (!current_is_kswapd() &&
+			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
+			get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
+		return true;
+	return false;
+}
+#else
+static bool skip_cma(struct folio *folio, struct scan_control *sc)
+{
+	return false;
+}
+#endif
+
 /*
  * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
  *
@@ -2239,7 +2259,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 		nr_pages = folio_nr_pages(folio);
 		total_scan += nr_pages;
 
-		if (folio_zonenum(folio) > sc->reclaim_idx) {
+		if (folio_zonenum(folio) > sc->reclaim_idx ||
+				skip_cma(folio, sc)) {
 			nr_skipped[folio_zonenum(folio)] += nr_pages;
 			move_to = &folios_skipped;
 			goto move;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-22  6:36 [PATCHv4] mm: skip CMA pages when they are not available zhaoyang.huang
@ 2023-05-25 20:03 ` Matthew Wilcox
  2023-05-26  2:30   ` Zhaoyang Huang
  2023-05-26 19:36 ` David Hildenbrand
  2023-05-26 23:03 ` Minchan Kim
  2 siblings, 1 reply; 7+ messages in thread
From: Matthew Wilcox @ 2023-05-25 20:03 UTC (permalink / raw)
  To: zhaoyang.huang
  Cc: Andrew Morton, Minchan Kim, Joonsoo Kim, linux-mm, linux-kernel,
	Zhaoyang Huang, ke.wang

On Mon, May 22, 2023 at 02:36:03PM +0800, zhaoyang.huang wrote:
> +#ifdef CONFIG_CMA
> +/*
> + * It is waste of effort to scan and reclaim CMA pages if it is not available
> + * for current allocation context
> + */
> +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> +{
> +	if (!current_is_kswapd() &&
> +			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> +			get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> +		return true;
> +	return false;
> +}
> +#else
> +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> +{
> +	return false;
> +}
> +#endif
> +
>  /*
>   * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
>   *
> @@ -2239,7 +2259,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>  		nr_pages = folio_nr_pages(folio);
>  		total_scan += nr_pages;
>  
> -		if (folio_zonenum(folio) > sc->reclaim_idx) {
> +		if (folio_zonenum(folio) > sc->reclaim_idx ||
> +				skip_cma(folio, sc)) {
>  			nr_skipped[folio_zonenum(folio)] += nr_pages;
>  			move_to = &folios_skipped;
>  			goto move;

I have no idea if what this patch is trying to accomplish is correct,
but I no longer object to how it is doing it.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-25 20:03 ` Matthew Wilcox
@ 2023-05-26  2:30   ` Zhaoyang Huang
  0 siblings, 0 replies; 7+ messages in thread
From: Zhaoyang Huang @ 2023-05-26  2:30 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: zhaoyang.huang, Andrew Morton, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, ke.wang

On Fri, May 26, 2023 at 4:03 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Mon, May 22, 2023 at 02:36:03PM +0800, zhaoyang.huang wrote:
> > +#ifdef CONFIG_CMA
> > +/*
> > + * It is waste of effort to scan and reclaim CMA pages if it is not available
> > + * for current allocation context
> > + */
> > +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> > +{
> > +     if (!current_is_kswapd() &&
> > +                     gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> > +                     get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> > +             return true;
> > +     return false;
> > +}
> > +#else
> > +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> > +{
> > +     return false;
> > +}
> > +#endif
> > +
> >  /*
> >   * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
> >   *
> > @@ -2239,7 +2259,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
> >               nr_pages = folio_nr_pages(folio);
> >               total_scan += nr_pages;
> >
> > -             if (folio_zonenum(folio) > sc->reclaim_idx) {
> > +             if (folio_zonenum(folio) > sc->reclaim_idx ||
> > +                             skip_cma(folio, sc)) {
> >                       nr_skipped[folio_zonenum(folio)] += nr_pages;
> >                       move_to = &folios_skipped;
> >                       goto move;
>
> I have no idea if what this patch is trying to accomplish is correct,
> but I no longer object to how it is doing it.
IMO, this is necessary as there could be such weird scenario, that is
an GFP_KERNEL allocation might get 32 MIGRATE_CMA pages via
direct_reclaim which lead to a low PSI_MEM/vmpressure value but return
a NULL pointer

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-22  6:36 [PATCHv4] mm: skip CMA pages when they are not available zhaoyang.huang
  2023-05-25 20:03 ` Matthew Wilcox
@ 2023-05-26 19:36 ` David Hildenbrand
  2023-05-29  1:02   ` Zhaoyang Huang
  2023-05-26 23:03 ` Minchan Kim
  2 siblings, 1 reply; 7+ messages in thread
From: David Hildenbrand @ 2023-05-26 19:36 UTC (permalink / raw)
  To: zhaoyang.huang, Andrew Morton, Matthew Wilcox, Minchan Kim,
	Joonsoo Kim, linux-mm, linux-kernel, Zhaoyang Huang, ke.wang

On 22.05.23 08:36, zhaoyang.huang wrote:
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> 
> This patch fixes unproductive reclaiming of CMA pages by skipping them when they
> are not available for current context. It is arise from bellowing OOM issue, which
> caused by large proportion of MIGRATE_CMA pages among free pages.
> 
> [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
> [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
> [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
> ...
> [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
> [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
> [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0
> 
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> ---
> v2: update commit message and fix build error when CONFIG_CMA is not set
> v3,v4: update code and comments
> ---
> ---
>   mm/vmscan.c | 23 ++++++++++++++++++++++-
>   1 file changed, 22 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bd6637f..20facec 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2193,6 +2193,26 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
>   
>   }
>   
> +#ifdef CONFIG_CMA
> +/*
> + * It is waste of effort to scan and reclaim CMA pages if it is not available
> + * for current allocation context
> + */

/*
  * Only movable allocations may end up on MIGRATE_CMA pageblocks. If
  * we're not dealing with a movable allocation, it doesn't make sense to
  * reclaim from these pageblocks: the reclaimed memory is unusable for
  * this allocation.
  */

Did I get it right?

> +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> +{
> +	if (!current_is_kswapd() &&
> +			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> +			get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> +		return true;
> +	return false;

	return !current_is_kswapd() &&
	       gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
	       get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;


-- 
Thanks,

David / dhildenb


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-22  6:36 [PATCHv4] mm: skip CMA pages when they are not available zhaoyang.huang
  2023-05-25 20:03 ` Matthew Wilcox
  2023-05-26 19:36 ` David Hildenbrand
@ 2023-05-26 23:03 ` Minchan Kim
  2023-05-29  1:11   ` Zhaoyang Huang
  2 siblings, 1 reply; 7+ messages in thread
From: Minchan Kim @ 2023-05-26 23:03 UTC (permalink / raw)
  To: zhaoyang.huang
  Cc: Andrew Morton, Matthew Wilcox, Joonsoo Kim, linux-mm,
	linux-kernel, Zhaoyang Huang, ke.wang

On Mon, May 22, 2023 at 02:36:03PM +0800, zhaoyang.huang wrote:
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> 
> This patch fixes unproductive reclaiming of CMA pages by skipping them when they
> are not available for current context. It is arise from bellowing OOM issue, which
> caused by large proportion of MIGRATE_CMA pages among free pages.
> 
> [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
> [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
> [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
> ...
> [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
> [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
> [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0
> 
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> ---
> v2: update commit message and fix build error when CONFIG_CMA is not set
> v3,v4: update code and comments
> ---
> ---
>  mm/vmscan.c | 23 ++++++++++++++++++++++-
>  1 file changed, 22 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bd6637f..20facec 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2193,6 +2193,26 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
>  
>  }
>  
> +#ifdef CONFIG_CMA
> +/*
> + * It is waste of effort to scan and reclaim CMA pages if it is not available
> + * for current allocation context
> + */
> +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> +{
> +	if (!current_is_kswapd() &&

The function is called by isolate_lru_folios which is used by both background
and direct reclaims at the same time. And sc->reclaim_idx below to filter
unproductive reclaim out is used for both cases but why does the cma is considering
only direct reclaim path?


> +			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> +			get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> +		return true;
> +	return false;
> +}
> +#else
> +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> +{
> +	return false;
> +}
> +#endif
> +
>  /*
>   * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
>   *
> @@ -2239,7 +2259,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>  		nr_pages = folio_nr_pages(folio);
>  		total_scan += nr_pages;
>  
> -		if (folio_zonenum(folio) > sc->reclaim_idx) {
> +		if (folio_zonenum(folio) > sc->reclaim_idx ||
> +				skip_cma(folio, sc)) {
>  			nr_skipped[folio_zonenum(folio)] += nr_pages;
>  			move_to = &folios_skipped;
>  			goto move;
> -- 
> 1.9.1
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-26 19:36 ` David Hildenbrand
@ 2023-05-29  1:02   ` Zhaoyang Huang
  0 siblings, 0 replies; 7+ messages in thread
From: Zhaoyang Huang @ 2023-05-29  1:02 UTC (permalink / raw)
  To: David Hildenbrand
  Cc: zhaoyang.huang, Andrew Morton, Matthew Wilcox, Minchan Kim,
	Joonsoo Kim, linux-mm, linux-kernel, ke.wang

On Sat, May 27, 2023 at 3:36 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 22.05.23 08:36, zhaoyang.huang wrote:
> > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> >
> > This patch fixes unproductive reclaiming of CMA pages by skipping them when they
> > are not available for current context. It is arise from bellowing OOM issue, which
> > caused by large proportion of MIGRATE_CMA pages among free pages.
> >
> > [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
> > [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
> > [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
> > ...
> > [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
> > [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
> > [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0
> >
> > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> > ---
> > v2: update commit message and fix build error when CONFIG_CMA is not set
> > v3,v4: update code and comments
> > ---
> > ---
> >   mm/vmscan.c | 23 ++++++++++++++++++++++-
> >   1 file changed, 22 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index bd6637f..20facec 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -2193,6 +2193,26 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
> >
> >   }
> >
> > +#ifdef CONFIG_CMA
> > +/*
> > + * It is waste of effort to scan and reclaim CMA pages if it is not available
> > + * for current allocation context
> > + */
>
> /*
>   * Only movable allocations may end up on MIGRATE_CMA pageblocks. If
>   * we're not dealing with a movable allocation, it doesn't make sense to
>   * reclaim from these pageblocks: the reclaimed memory is unusable for
>   * this allocation.
>   */
>
> Did I get it right?
Yes, it is right.
>
> > +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> > +{
> > +     if (!current_is_kswapd() &&
> > +                     gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> > +                     get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> > +             return true;
> > +     return false;
>
>         return !current_is_kswapd() &&
>                gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
>                get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
ok, thanks
>
>
> --
> Thanks,
>
> David / dhildenb
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv4] mm: skip CMA pages when they are not available
  2023-05-26 23:03 ` Minchan Kim
@ 2023-05-29  1:11   ` Zhaoyang Huang
  0 siblings, 0 replies; 7+ messages in thread
From: Zhaoyang Huang @ 2023-05-29  1:11 UTC (permalink / raw)
  To: Minchan Kim
  Cc: zhaoyang.huang, Andrew Morton, Matthew Wilcox, Joonsoo Kim,
	linux-mm, linux-kernel, ke.wang

On Sat, May 27, 2023 at 7:03 AM Minchan Kim <minchan@kernel.org> wrote:
>
> On Mon, May 22, 2023 at 02:36:03PM +0800, zhaoyang.huang wrote:
> > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> >
> > This patch fixes unproductive reclaiming of CMA pages by skipping them when they
> > are not available for current context. It is arise from bellowing OOM issue, which
> > caused by large proportion of MIGRATE_CMA pages among free pages.
> >
> > [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
> > [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
> > [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
> > ...
> > [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
> > [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
> > [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0
> >
> > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> > ---
> > v2: update commit message and fix build error when CONFIG_CMA is not set
> > v3,v4: update code and comments
> > ---
> > ---
> >  mm/vmscan.c | 23 ++++++++++++++++++++++-
> >  1 file changed, 22 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index bd6637f..20facec 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -2193,6 +2193,26 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
> >
> >  }
> >
> > +#ifdef CONFIG_CMA
> > +/*
> > + * It is waste of effort to scan and reclaim CMA pages if it is not available
> > + * for current allocation context
> > + */
> > +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> > +{
> > +     if (!current_is_kswapd() &&
>
> The function is called by isolate_lru_folios which is used by both background
> and direct reclaims at the same time. And sc->reclaim_idx below to filter
> unproductive reclaim out is used for both cases but why does the cma is considering
> only direct reclaim path?
Because kswapd's sc->gfp_mask = GFP_KERNEL which can not distinguish
this scenario
>
>
> > +                     gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
> > +                     get_pageblock_migratetype(&folio->page) == MIGRATE_CMA)
> > +             return true;
> > +     return false;
> > +}
> > +#else
> > +static bool skip_cma(struct folio *folio, struct scan_control *sc)
> > +{
> > +     return false;
> > +}
> > +#endif
> > +
> >  /*
> >   * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
> >   *
> > @@ -2239,7 +2259,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
> >               nr_pages = folio_nr_pages(folio);
> >               total_scan += nr_pages;
> >
> > -             if (folio_zonenum(folio) > sc->reclaim_idx) {
> > +             if (folio_zonenum(folio) > sc->reclaim_idx ||
> > +                             skip_cma(folio, sc)) {
> >                       nr_skipped[folio_zonenum(folio)] += nr_pages;
> >                       move_to = &folios_skipped;
> >                       goto move;
> > --
> > 1.9.1
> >

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-05-29  1:11 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-22  6:36 [PATCHv4] mm: skip CMA pages when they are not available zhaoyang.huang
2023-05-25 20:03 ` Matthew Wilcox
2023-05-26  2:30   ` Zhaoyang Huang
2023-05-26 19:36 ` David Hildenbrand
2023-05-29  1:02   ` Zhaoyang Huang
2023-05-26 23:03 ` Minchan Kim
2023-05-29  1:11   ` Zhaoyang Huang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.