linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [Resend PATCHv2] mm: skip CMA pages when they are not available
@ 2023-05-15  9:38 zhaoyang.huang
  2023-05-19  8:41 ` Zhaoyang Huang
  0 siblings, 1 reply; 4+ messages in thread
From: zhaoyang.huang @ 2023-05-15  9:38 UTC (permalink / raw)
  To: Andrew Morton, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, Zhaoyang Huang, ke.wang

From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

This patch fixes unproductive reclaiming of CMA pages by skipping them when they
are not available for current context. It is arise from bellowing OOM issue, which
caused by large proportion of MIGRATE_CMA pages among free pages. There has been
commit(168676649) to fix it by trying CMA pages first instead of fallback in
rmqueue. I would like to propose another one from reclaiming perspective.

04166 < 4> [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
0419C < 4> [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
0419D < 4> [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
	......
041EA < 4> [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
041EB < 4> [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
041EC < 4> [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
v2: update commit message and fix build error when CONFIG_CMA is not set
---
---
 mm/vmscan.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd6637f..19fb445 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2225,10 +2225,16 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
 	unsigned long skipped = 0;
 	unsigned long scan, total_scan, nr_pages;
+	bool cma_cap = true;
+	struct page *page;
 	LIST_HEAD(folios_skipped);
 
 	total_scan = 0;
 	scan = 0;
+	if ((IS_ENABLED(CONFIG_CMA)) && !current_is_kswapd()
+		&& (gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE))
+		cma_cap = false;
+
 	while (scan < nr_to_scan && !list_empty(src)) {
 		struct list_head *move_to = src;
 		struct folio *folio;
@@ -2239,12 +2245,17 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 		nr_pages = folio_nr_pages(folio);
 		total_scan += nr_pages;
 
-		if (folio_zonenum(folio) > sc->reclaim_idx) {
+		page = &folio->page;
+
+		if ((folio_zonenum(folio) > sc->reclaim_idx)
+#ifdef CONFIG_CMA
+			|| (get_pageblock_migratetype(page) == MIGRATE_CMA && !cma_cap)
+#endif
+		) {
 			nr_skipped[folio_zonenum(folio)] += nr_pages;
 			move_to = &folios_skipped;
 			goto move;
 		}
-
 		/*
 		 * Do not count skipped folios because that makes the function
 		 * return with no isolated folios if the LRU mostly contains
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Resend PATCHv2] mm: skip CMA pages when they are not available
  2023-05-15  9:38 [Resend PATCHv2] mm: skip CMA pages when they are not available zhaoyang.huang
@ 2023-05-19  8:41 ` Zhaoyang Huang
  2023-05-19 21:58   ` Andrew Morton
  0 siblings, 1 reply; 4+ messages in thread
From: Zhaoyang Huang @ 2023-05-19  8:41 UTC (permalink / raw)
  To: zhaoyang.huang
  Cc: Andrew Morton, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, ke.wang

any comments?

On Mon, May 15, 2023 at 5:40 PM zhaoyang.huang
<zhaoyang.huang@unisoc.com> wrote:
>
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
>
> This patch fixes unproductive reclaiming of CMA pages by skipping them when they
> are not available for current context. It is arise from bellowing OOM issue, which
> caused by large proportion of MIGRATE_CMA pages among free pages. There has been
> commit(168676649) to fix it by trying CMA pages first instead of fallback in
> rmqueue. I would like to propose another one from reclaiming perspective.
>
> 04166 < 4> [   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
> 0419C < 4> [   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
> 0419D < 4> [   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
>         ......
> 041EA < 4> [   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
> 041EB < 4> [   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
> 041EC < 4> [   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0
>
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> ---
> v2: update commit message and fix build error when CONFIG_CMA is not set
> ---
> ---
>  mm/vmscan.c | 15 +++++++++++++--
>  1 file changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bd6637f..19fb445 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2225,10 +2225,16 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
>         unsigned long skipped = 0;
>         unsigned long scan, total_scan, nr_pages;
> +       bool cma_cap = true;
> +       struct page *page;
>         LIST_HEAD(folios_skipped);
>
>         total_scan = 0;
>         scan = 0;
> +       if ((IS_ENABLED(CONFIG_CMA)) && !current_is_kswapd()
> +               && (gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE))
> +               cma_cap = false;
> +
>         while (scan < nr_to_scan && !list_empty(src)) {
>                 struct list_head *move_to = src;
>                 struct folio *folio;
> @@ -2239,12 +2245,17 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>                 nr_pages = folio_nr_pages(folio);
>                 total_scan += nr_pages;
>
> -               if (folio_zonenum(folio) > sc->reclaim_idx) {
> +               page = &folio->page;
> +
> +               if ((folio_zonenum(folio) > sc->reclaim_idx)
> +#ifdef CONFIG_CMA
> +                       || (get_pageblock_migratetype(page) == MIGRATE_CMA && !cma_cap)
> +#endif
> +               ) {
>                         nr_skipped[folio_zonenum(folio)] += nr_pages;
>                         move_to = &folios_skipped;
>                         goto move;
>                 }
> -
>                 /*
>                  * Do not count skipped folios because that makes the function
>                  * return with no isolated folios if the LRU mostly contains
> --
> 1.9.1
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Resend PATCHv2] mm: skip CMA pages when they are not available
  2023-05-19  8:41 ` Zhaoyang Huang
@ 2023-05-19 21:58   ` Andrew Morton
  2023-05-22  2:06     ` Zhaoyang Huang
  0 siblings, 1 reply; 4+ messages in thread
From: Andrew Morton @ 2023-05-19 21:58 UTC (permalink / raw)
  To: Zhaoyang Huang
  Cc: zhaoyang.huang, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, ke.wang

On Fri, 19 May 2023 16:41:41 +0800 Zhaoyang Huang <huangzhaoyang@gmail.com> wrote:

> any comments?

Have any of the regular CMA developers commented on a version of this?


I have a couple of little complaints:

> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bd6637f..19fb445 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2225,10 +2225,16 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
>         unsigned long skipped = 0;
>         unsigned long scan, total_scan, nr_pages;
> +       bool cma_cap = true;
> +       struct page *page;
>         LIST_HEAD(folios_skipped);
>
>         total_scan = 0;
>         scan = 0;
> +       if ((IS_ENABLED(CONFIG_CMA)) && !current_is_kswapd()
> +               && (gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE))
> +               cma_cap = false;
> +

A code comment above this alteration would be good.  Tell the reader
why we're doing this.


>         while (scan < nr_to_scan && !list_empty(src)) {
>                 struct list_head *move_to = src;
>                 struct folio *folio;
> @@ -2239,12 +2245,17 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>                 nr_pages = folio_nr_pages(folio);
>                 total_scan += nr_pages;
>
> -               if (folio_zonenum(folio) > sc->reclaim_idx) {
> +               page = &folio->page;
> +
> +               if ((folio_zonenum(folio) > sc->reclaim_idx)
> +#ifdef CONFIG_CMA
> +                       || (get_pageblock_migratetype(page) == MIGRATE_CMA && !cma_cap)
> +#endif
> +               ) {
>                         nr_skipped[folio_zonenum(folio)] += nr_pages;
>                         move_to = &folios_skipped;
>                         goto move;
>                 }

That's pretty ugly.  Can we use IS_ENABLED(CONFIG_CMA) here to avoid
the ifdef?


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Resend PATCHv2] mm: skip CMA pages when they are not available
  2023-05-19 21:58   ` Andrew Morton
@ 2023-05-22  2:06     ` Zhaoyang Huang
  0 siblings, 0 replies; 4+ messages in thread
From: Zhaoyang Huang @ 2023-05-22  2:06 UTC (permalink / raw)
  To: Andrew Morton
  Cc: zhaoyang.huang, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, ke.wang

On Sat, May 20, 2023 at 5:58 AM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Fri, 19 May 2023 16:41:41 +0800 Zhaoyang Huang <huangzhaoyang@gmail.com> wrote:
>
> > any comments?
>
> Have any of the regular CMA developers commented on a version of this?
None comments until now. IMO, it is mainly affect reclaiming process.
>
>
> I have a couple of little complaints:
>
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index bd6637f..19fb445 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -2225,10 +2225,16 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
> >         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
> >         unsigned long skipped = 0;
> >         unsigned long scan, total_scan, nr_pages;
> > +       bool cma_cap = true;
> > +       struct page *page;
> >         LIST_HEAD(folios_skipped);
> >
> >         total_scan = 0;
> >         scan = 0;
> > +       if ((IS_ENABLED(CONFIG_CMA)) && !current_is_kswapd()
> > +               && (gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE))
> > +               cma_cap = false;
> > +
>
> A code comment above this alteration would be good.  Tell the reader
> why we're doing this.
ok, will update
>
>
> >         while (scan < nr_to_scan && !list_empty(src)) {
> >                 struct list_head *move_to = src;
> >                 struct folio *folio;
> > @@ -2239,12 +2245,17 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
> >                 nr_pages = folio_nr_pages(folio);
> >                 total_scan += nr_pages;
> >
> > -               if (folio_zonenum(folio) > sc->reclaim_idx) {
> > +               page = &folio->page;
> > +
> > +               if ((folio_zonenum(folio) > sc->reclaim_idx)
> > +#ifdef CONFIG_CMA
> > +                       || (get_pageblock_migratetype(page) == MIGRATE_CMA && !cma_cap)
> > +#endif
> > +               ) {
> >                         nr_skipped[folio_zonenum(folio)] += nr_pages;
> >                         move_to = &folios_skipped;
> >                         goto move;
> >                 }
>
> That's pretty ugly.  Can we use IS_ENABLED(CONFIG_CMA) here to avoid
> the ifdef?
ok
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-05-22  2:07 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-15  9:38 [Resend PATCHv2] mm: skip CMA pages when they are not available zhaoyang.huang
2023-05-19  8:41 ` Zhaoyang Huang
2023-05-19 21:58   ` Andrew Morton
2023-05-22  2:06     ` Zhaoyang Huang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).