linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH] mm, page_alloc: avoid page_to_pfn() in move_freepages()
@ 2019-11-27 10:28 Kefeng Wang
  2019-11-27 10:47 ` David Hildenbrand
  2019-11-27 11:47 ` Michal Hocko
  0 siblings, 2 replies; 12+ messages in thread
From: Kefeng Wang @ 2019-11-27 10:28 UTC (permalink / raw)
  To: linux-mm; +Cc: Kefeng Wang, Andrew Morton, Michal Hocko, Vlastimil Babka

The start_pfn and end_pfn are already available in move_freepages_block(),
pfn_valid_within() should validate pfn first before touching the page,
or we might access an unitialized page with CONFIG_HOLES_IN_ZONE configs.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---

Here is an oops in 4.4(arm64 enabled CONFIG_HOLES_IN_ZONE),

Unable to handle kernel NULL pointer dereference at virtual address 00000000
pgd = ffffff8008f7e000
[00000000] *pgd=0000000017ffe003, *pud=0000000017ffe003, *pmd=0000000000000000
Internal error: Oops: 96000007 [#1] SMP
CPU: 0 PID: 0 Comm: swapper/0 Tainted: G        W  O    4.4.185 #1

PC is at move_freepages+0x80/0x10c
LR is at move_freepages_block+0xd4/0xf4
pc : [<ffffff80083332e8>] lr : [<ffffff8008333448>] pstate: 80000085
[...]
[<ffffff80083332e8>] move_freepages+0x80/0x10c
[<ffffff8008333448>] move_freepages_block+0xd4/0xf4
[<ffffff8008335414>] __rmqueue+0x2bc/0x44c
[<ffffff800833580c>] get_page_from_freelist+0x268/0x600
[<ffffff8008335e84>] __alloc_pages_nodemask+0x184/0x88c
[<ffffff800837fae8>] new_slab+0xd0/0x494
[<ffffff8008381834>] ___slab_alloc.constprop.29+0x1c8/0x2e8
[<ffffff80083819a8>] __slab_alloc.constprop.28+0x54/0x84
[<ffffff8008381e68>] kmem_cache_alloc+0x64/0x198
[<ffffff80085b04e0>] __build_skb+0x44/0xa4
[<ffffff80085b06e4>] __netdev_alloc_skb+0xe4/0x134

 mm/page_alloc.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f391c0c4ed1d..59f2c2b860fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2246,19 +2246,21 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  * boundary. If alignment is required, use move_freepages_block()
  */
 static int move_freepages(struct zone *zone,
-			  struct page *start_page, struct page *end_page,
+			  unsigned long start_pfn, unsigned long end_pfn,
 			  int migratetype, int *num_movable)
 {
 	struct page *page;
+	unsigned long pfn;
 	unsigned int order;
 	int pages_moved = 0;
 
-	for (page = start_page; page <= end_page;) {
-		if (!pfn_valid_within(page_to_pfn(page))) {
-			page++;
+	for (pfn = start_pfn; pfn <= end_pfn;) {
+		if (!pfn_valid_within(pfn)) {
+			pfn++;
 			continue;
 		}
 
+		page = pfn_to_page(pfn);
 		if (!PageBuddy(page)) {
 			/*
 			 * We assume that pages that could be isolated for
@@ -2268,8 +2270,7 @@ static int move_freepages(struct zone *zone,
 			if (num_movable &&
 					(PageLRU(page) || __PageMovable(page)))
 				(*num_movable)++;
-
-			page++;
+			pfn++;
 			continue;
 		}
 
@@ -2280,6 +2281,7 @@ static int move_freepages(struct zone *zone,
 		order = page_order(page);
 		move_to_free_area(page, &zone->free_area[order], migratetype);
 		page += 1 << order;
+		pfn += 1 << order;
 		pages_moved += 1 << order;
 	}
 
@@ -2289,25 +2291,22 @@ static int move_freepages(struct zone *zone,
 int move_freepages_block(struct zone *zone, struct page *page,
 				int migratetype, int *num_movable)
 {
-	unsigned long start_pfn, end_pfn;
-	struct page *start_page, *end_page;
+	unsigned long start_pfn, end_pfn, pfn;
 
 	if (num_movable)
 		*num_movable = 0;
 
-	start_pfn = page_to_pfn(page);
+	pfn = start_pfn = page_to_pfn(page);
 	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
-	start_page = pfn_to_page(start_pfn);
-	end_page = start_page + pageblock_nr_pages - 1;
 	end_pfn = start_pfn + pageblock_nr_pages - 1;
 
 	/* Do not cross zone boundaries */
 	if (!zone_spans_pfn(zone, start_pfn))
-		start_page = page;
+		start_pfn = pfn;
 	if (!zone_spans_pfn(zone, end_pfn))
 		return 0;
 
-	return move_freepages(zone, start_page, end_page, migratetype,
+	return move_freepages(zone, start_pfn, end_pfn, migratetype,
 								num_movable);
 }
 
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-11-27 17:15 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-27 10:28 [RFC PATCH] mm, page_alloc: avoid page_to_pfn() in move_freepages() Kefeng Wang
2019-11-27 10:47 ` David Hildenbrand
2019-11-27 11:18   ` [PATCH] " Kefeng Wang
2019-11-27 17:06     ` David Hildenbrand
2019-11-27 11:21   ` [RFC PATCH] " Kefeng Wang
2019-11-27 11:47 ` Michal Hocko
2019-11-27 13:13   ` Kefeng Wang
2019-11-27 14:13     ` Michal Hocko
2019-11-27 14:28       ` Qian Cai
2019-11-27 14:39       ` Kefeng Wang
2019-11-27 15:09         ` Qian Cai
2019-11-27 17:15       ` David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).