All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv3] mm: skip CMA pages when they are not available
@ 2023-05-22  3:08 zhaoyang.huang
  2023-05-22  4:07 ` Matthew Wilcox
  0 siblings, 1 reply; 2+ messages in thread
From: zhaoyang.huang @ 2023-05-22  3:08 UTC (permalink / raw)
  To: Andrew Morton, Matthew Wilcox, Minchan Kim, Joonsoo Kim,
	linux-mm, linux-kernel, Zhaoyang Huang, ke.wang

From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

This patch fixes unproductive reclaiming of CMA pages by skipping them when they
are not available for current context. It is arise from bellowing OOM issue, which
caused by large proportion of MIGRATE_CMA pages among free pages.

[   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
[   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
[   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
...
[   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
[   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
[   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
v2: update commit message and fix build error when CONFIG_CMA is not set
V3: update code and comments
---
---
 mm/vmscan.c | 27 ++++++++++++++++++++++++---
 1 file changed, 24 insertions(+), 3 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd6637f..17cd246 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2192,7 +2192,24 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
 	}
 
 }
-
+#ifdef CONFIG_CMA
+/*
+ * It is waste of effort to scan and reclaim CMA pages if it is not available
+ * for current allocation context
+ */
+static bool skip_cma(struct page *page, struct scan_control *sc)
+{
+	if (!current_is_kswapd() && gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE
+		&& get_pageblock_migratetype(page) == MIGRATE_CMA)
+		return true;
+	return false;
+}
+#else
+static bool skip_cma(struct page *page, struct scan_control *sc)
+{
+	return false;
+}
+#endif
 /*
  * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
  *
@@ -2225,10 +2242,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
 	unsigned long skipped = 0;
 	unsigned long scan, total_scan, nr_pages;
+	struct page *page;
 	LIST_HEAD(folios_skipped);
 
 	total_scan = 0;
 	scan = 0;
+
 	while (scan < nr_to_scan && !list_empty(src)) {
 		struct list_head *move_to = src;
 		struct folio *folio;
@@ -2239,12 +2258,14 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
 		nr_pages = folio_nr_pages(folio);
 		total_scan += nr_pages;
 
-		if (folio_zonenum(folio) > sc->reclaim_idx) {
+		page = &folio->page;
+
+		if (folio_zonenum(folio) > sc->reclaim_idx
+			|| skip_cma(page, sc)) {
 			nr_skipped[folio_zonenum(folio)] += nr_pages;
 			move_to = &folios_skipped;
 			goto move;
 		}
-
 		/*
 		 * Do not count skipped folios because that makes the function
 		 * return with no isolated folios if the LRU mostly contains
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCHv3] mm: skip CMA pages when they are not available
  2023-05-22  3:08 [PATCHv3] mm: skip CMA pages when they are not available zhaoyang.huang
@ 2023-05-22  4:07 ` Matthew Wilcox
  0 siblings, 0 replies; 2+ messages in thread
From: Matthew Wilcox @ 2023-05-22  4:07 UTC (permalink / raw)
  To: zhaoyang.huang
  Cc: Andrew Morton, Minchan Kim, Joonsoo Kim, linux-mm, linux-kernel,
	Zhaoyang Huang, ke.wang

On Mon, May 22, 2023 at 11:08:02AM +0800, zhaoyang.huang wrote:
> +static bool skip_cma(struct page *page, struct scan_control *sc)
> +{
> +	if (!current_is_kswapd() && gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE
> +		&& get_pageblock_migratetype(page) == MIGRATE_CMA)
> +		return true;

Putting the 'return' at the same level of indentation as the second half
of the conditional is wrong.  It confuses the reader.  Also, the &&
needs to go at the end of the line not the beginning (read the
codingstyle documentation!)  Also there's no good reason to use such
long lines.  ie do this instead:

	if (!current_is_kswapd() &&
			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
			get_pageblock_migratetype(page) == MIGRATE_CMA)
		return true;

if you prefer, this style of indent is also acceptable:

	if (!current_is_kswapd() &&
	    gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
	    get_pageblock_migratetype(page) == MIGRATE_CMA)
		return true;

> @@ -2225,10 +2242,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>  	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
>  	unsigned long skipped = 0;
>  	unsigned long scan, total_scan, nr_pages;
> +	struct page *page;

No, don't do this.

>  	LIST_HEAD(folios_skipped);
>  
>  	total_scan = 0;
>  	scan = 0;
> +

Don't add this completely unrelated whitespace change either.

>  	while (scan < nr_to_scan && !list_empty(src)) {
>  		struct list_head *move_to = src;
>  		struct folio *folio;
> @@ -2239,12 +2258,14 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
>  		nr_pages = folio_nr_pages(folio);
>  		total_scan += nr_pages;
>  
> -		if (folio_zonenum(folio) > sc->reclaim_idx) {
> +		page = &folio->page;
> +
> +		if (folio_zonenum(folio) > sc->reclaim_idx
> +			|| skip_cma(page, sc)) {

Again, this is not where the || goes.

And skip_cma() should take a folio, not a page.  It's unreasonable
to ask you to convert get_pageblock_migratetype(),
get_pfnblock_flags_mask(), __get_pfnblock_flags_mask(), etc to
use a folio (... although someone looking for a project could do that
...).  Instead, you should do the folio->page conversion inside
skip_cma().

>  			nr_skipped[folio_zonenum(folio)] += nr_pages;
>  			move_to = &folios_skipped;
>  			goto move;
>  		}
> -

No unnecessary whitespace changes.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-05-22  4:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-22  3:08 [PATCHv3] mm: skip CMA pages when they are not available zhaoyang.huang
2023-05-22  4:07 ` Matthew Wilcox

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.