All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] mm, page_alloc: don't convert pfn to idx when merging
@ 2016-12-16 12:00 ` Vlastimil Babka
  0 siblings, 0 replies; 8+ messages in thread
From: Vlastimil Babka @ 2016-12-16 12:00 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Joonsoo Kim, Michal Hocko, Mel Gorman,
	Kirill A. Shutemov, Johannes Weiner, Vlastimil Babka

In __free_one_page() we do the buddy merging arithmetics on "page/buddy index",
which is just the lower MAX_ORDER bits of pfn. The operations we do that affect
the higher bits are bitwise AND and subtraction (in that order), where the
final result will be the same with the higher bits left unmasked, as long as
these bits are equal for both buddies - which must be true by the definition of
a buddy.

We can therefore use pfn's directly instead of "index" and skip the zeroing of
>MAX_ORDER bits. This can help a bit by itself, although compiler might be
smart enough already. It also helps the next patch to avoid page_to_pfn() for
memory hole checks.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
Changes since v1:
o rebase to mmotm-2016-12-14-16-01
o fix mm/page_isolation.c
o don't rename 'pfn' to 'page_pfn'

 mm/internal.h       |  4 ++--
 mm/page_alloc.c     | 31 ++++++++++++++-----------------
 mm/page_isolation.c |  8 ++++----
 3 files changed, 20 insertions(+), 23 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 44d68895a9b9..ebb3cbd21937 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -131,9 +131,9 @@ struct alloc_context {
  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  */
 static inline unsigned long
-__find_buddy_index(unsigned long page_idx, unsigned int order)
+__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
 {
-	return page_idx ^ (1 << order);
+	return page_pfn ^ (1 << order);
 }
 
 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f6d5b73e1d7c..771fc8e18736 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -787,9 +787,8 @@ static inline void __free_one_page(struct page *page,
 		struct zone *zone, unsigned int order,
 		int migratetype)
 {
-	unsigned long page_idx;
-	unsigned long combined_idx;
-	unsigned long uninitialized_var(buddy_idx);
+	unsigned long combined_pfn;
+	unsigned long uninitialized_var(buddy_pfn);
 	struct page *buddy;
 	unsigned int max_order;
 
@@ -802,15 +801,13 @@ static inline void __free_one_page(struct page *page,
 	if (likely(!is_migrate_isolate(migratetype)))
 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
 
-	page_idx = pfn & ((1 << MAX_ORDER) - 1);
-
-	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
 
 continue_merging:
 	while (order < max_order - 1) {
-		buddy_idx = __find_buddy_index(page_idx, order);
-		buddy = page + (buddy_idx - page_idx);
+		buddy_pfn = __find_buddy_pfn(pfn, order);
+		buddy = page + (buddy_pfn - pfn);
 		if (!page_is_buddy(page, buddy, order))
 			goto done_merging;
 		/*
@@ -824,9 +821,9 @@ static inline void __free_one_page(struct page *page,
 			zone->free_area[order].nr_free--;
 			rmv_page_order(buddy);
 		}
-		combined_idx = buddy_idx & page_idx;
-		page = page + (combined_idx - page_idx);
-		page_idx = combined_idx;
+		combined_pfn = buddy_pfn & pfn;
+		page = page + (combined_pfn - pfn);
+		pfn = combined_pfn;
 		order++;
 	}
 	if (max_order < MAX_ORDER) {
@@ -841,8 +838,8 @@ static inline void __free_one_page(struct page *page,
 		if (unlikely(has_isolate_pageblock(zone))) {
 			int buddy_mt;
 
-			buddy_idx = __find_buddy_index(page_idx, order);
-			buddy = page + (buddy_idx - page_idx);
+			buddy_pfn = __find_buddy_pfn(pfn, order);
+			buddy = page + (buddy_pfn - pfn);
 			buddy_mt = get_pageblock_migratetype(buddy);
 
 			if (migratetype != buddy_mt
@@ -867,10 +864,10 @@ static inline void __free_one_page(struct page *page,
 	 */
 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 		struct page *higher_page, *higher_buddy;
-		combined_idx = buddy_idx & page_idx;
-		higher_page = page + (combined_idx - page_idx);
-		buddy_idx = __find_buddy_index(combined_idx, order + 1);
-		higher_buddy = higher_page + (buddy_idx - combined_idx);
+		combined_pfn = buddy_pfn & pfn;
+		higher_page = page + (combined_pfn - pfn);
+		buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
+		higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 			list_add_tail(&page->lru,
 				&zone->free_area[order].free_list[migratetype]);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a5594bfcc5ed..dadb7e74d7d6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 	unsigned long flags, nr_pages;
 	bool isolated_page = false;
 	unsigned int order;
-	unsigned long page_idx, buddy_idx;
+	unsigned long pfn, buddy_pfn;
 	struct page *buddy;
 
 	zone = page_zone(page);
@@ -102,9 +102,9 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 	if (PageBuddy(page)) {
 		order = page_order(page);
 		if (order >= pageblock_order) {
-			page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
-			buddy_idx = __find_buddy_index(page_idx, order);
-			buddy = page + (buddy_idx - page_idx);
+			pfn = page_to_pfn(page);
+			buddy_pfn = __find_buddy_pfn(pfn, order);
+			buddy = page + (buddy_pfn - pfn);
 
 			if (pfn_valid_within(page_to_pfn(buddy)) &&
 			    !is_migrate_isolate_page(buddy)) {
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 1/2] mm, page_alloc: don't convert pfn to idx when merging
@ 2016-12-16 12:00 ` Vlastimil Babka
  0 siblings, 0 replies; 8+ messages in thread
From: Vlastimil Babka @ 2016-12-16 12:00 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Joonsoo Kim, Michal Hocko, Mel Gorman,
	Kirill A. Shutemov, Johannes Weiner, Vlastimil Babka

In __free_one_page() we do the buddy merging arithmetics on "page/buddy index",
which is just the lower MAX_ORDER bits of pfn. The operations we do that affect
the higher bits are bitwise AND and subtraction (in that order), where the
final result will be the same with the higher bits left unmasked, as long as
these bits are equal for both buddies - which must be true by the definition of
a buddy.

We can therefore use pfn's directly instead of "index" and skip the zeroing of
>MAX_ORDER bits. This can help a bit by itself, although compiler might be
smart enough already. It also helps the next patch to avoid page_to_pfn() for
memory hole checks.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
Changes since v1:
o rebase to mmotm-2016-12-14-16-01
o fix mm/page_isolation.c
o don't rename 'pfn' to 'page_pfn'

 mm/internal.h       |  4 ++--
 mm/page_alloc.c     | 31 ++++++++++++++-----------------
 mm/page_isolation.c |  8 ++++----
 3 files changed, 20 insertions(+), 23 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 44d68895a9b9..ebb3cbd21937 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -131,9 +131,9 @@ struct alloc_context {
  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  */
 static inline unsigned long
-__find_buddy_index(unsigned long page_idx, unsigned int order)
+__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
 {
-	return page_idx ^ (1 << order);
+	return page_pfn ^ (1 << order);
 }
 
 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f6d5b73e1d7c..771fc8e18736 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -787,9 +787,8 @@ static inline void __free_one_page(struct page *page,
 		struct zone *zone, unsigned int order,
 		int migratetype)
 {
-	unsigned long page_idx;
-	unsigned long combined_idx;
-	unsigned long uninitialized_var(buddy_idx);
+	unsigned long combined_pfn;
+	unsigned long uninitialized_var(buddy_pfn);
 	struct page *buddy;
 	unsigned int max_order;
 
@@ -802,15 +801,13 @@ static inline void __free_one_page(struct page *page,
 	if (likely(!is_migrate_isolate(migratetype)))
 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
 
-	page_idx = pfn & ((1 << MAX_ORDER) - 1);
-
-	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
 
 continue_merging:
 	while (order < max_order - 1) {
-		buddy_idx = __find_buddy_index(page_idx, order);
-		buddy = page + (buddy_idx - page_idx);
+		buddy_pfn = __find_buddy_pfn(pfn, order);
+		buddy = page + (buddy_pfn - pfn);
 		if (!page_is_buddy(page, buddy, order))
 			goto done_merging;
 		/*
@@ -824,9 +821,9 @@ static inline void __free_one_page(struct page *page,
 			zone->free_area[order].nr_free--;
 			rmv_page_order(buddy);
 		}
-		combined_idx = buddy_idx & page_idx;
-		page = page + (combined_idx - page_idx);
-		page_idx = combined_idx;
+		combined_pfn = buddy_pfn & pfn;
+		page = page + (combined_pfn - pfn);
+		pfn = combined_pfn;
 		order++;
 	}
 	if (max_order < MAX_ORDER) {
@@ -841,8 +838,8 @@ static inline void __free_one_page(struct page *page,
 		if (unlikely(has_isolate_pageblock(zone))) {
 			int buddy_mt;
 
-			buddy_idx = __find_buddy_index(page_idx, order);
-			buddy = page + (buddy_idx - page_idx);
+			buddy_pfn = __find_buddy_pfn(pfn, order);
+			buddy = page + (buddy_pfn - pfn);
 			buddy_mt = get_pageblock_migratetype(buddy);
 
 			if (migratetype != buddy_mt
@@ -867,10 +864,10 @@ static inline void __free_one_page(struct page *page,
 	 */
 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 		struct page *higher_page, *higher_buddy;
-		combined_idx = buddy_idx & page_idx;
-		higher_page = page + (combined_idx - page_idx);
-		buddy_idx = __find_buddy_index(combined_idx, order + 1);
-		higher_buddy = higher_page + (buddy_idx - combined_idx);
+		combined_pfn = buddy_pfn & pfn;
+		higher_page = page + (combined_pfn - pfn);
+		buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
+		higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 			list_add_tail(&page->lru,
 				&zone->free_area[order].free_list[migratetype]);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a5594bfcc5ed..dadb7e74d7d6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -83,7 +83,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 	unsigned long flags, nr_pages;
 	bool isolated_page = false;
 	unsigned int order;
-	unsigned long page_idx, buddy_idx;
+	unsigned long pfn, buddy_pfn;
 	struct page *buddy;
 
 	zone = page_zone(page);
@@ -102,9 +102,9 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 	if (PageBuddy(page)) {
 		order = page_order(page);
 		if (order >= pageblock_order) {
-			page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
-			buddy_idx = __find_buddy_index(page_idx, order);
-			buddy = page + (buddy_idx - page_idx);
+			pfn = page_to_pfn(page);
+			buddy_pfn = __find_buddy_pfn(pfn, order);
+			buddy = page + (buddy_pfn - pfn);
 
 			if (pfn_valid_within(page_to_pfn(buddy)) &&
 			    !is_migrate_isolate_page(buddy)) {
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
  2016-12-16 12:00 ` Vlastimil Babka
@ 2016-12-16 12:00   ` Vlastimil Babka
  -1 siblings, 0 replies; 8+ messages in thread
From: Vlastimil Babka @ 2016-12-16 12:00 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Joonsoo Kim, Michal Hocko, Mel Gorman,
	Kirill A. Shutemov, Johannes Weiner, Vlastimil Babka

On architectures that allow memory holes, page_is_buddy() has to perform
page_to_pfn() to check for the memory hole. After the previous patch, we have
the pfn already available in __free_one_page(), which is the only caller of
page_is_buddy(), so move the check there and avoid page_to_pfn().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 mm/page_alloc.c     | 10 +++++-----
 mm/page_isolation.c |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 771fc8e18736..f546eeea24d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -714,7 +714,7 @@ static inline void rmv_page_order(struct page *page)
 /*
  * This function checks whether a page is free && is the buddy
  * we can do coalesce a page and its buddy if
- * (a) the buddy is not in a hole &&
+ * (a) the buddy is not in a hole (check before calling!) &&
  * (b) the buddy is in the buddy system &&
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
@@ -729,9 +729,6 @@ static inline void rmv_page_order(struct page *page)
 static inline int page_is_buddy(struct page *page, struct page *buddy,
 							unsigned int order)
 {
-	if (!pfn_valid_within(page_to_pfn(buddy)))
-		return 0;
-
 	if (page_is_guard(buddy) && page_order(buddy) == order) {
 		if (page_zone_id(page) != page_zone_id(buddy))
 			return 0;
@@ -808,6 +805,9 @@ static inline void __free_one_page(struct page *page,
 	while (order < max_order - 1) {
 		buddy_pfn = __find_buddy_pfn(pfn, order);
 		buddy = page + (buddy_pfn - pfn);
+
+		if (!pfn_valid_within(buddy_pfn))
+			goto done_merging;
 		if (!page_is_buddy(page, buddy, order))
 			goto done_merging;
 		/*
@@ -862,7 +862,7 @@ static inline void __free_one_page(struct page *page,
 	 * so it's less likely to be used soon and more likely to be merged
 	 * as a higher order page
 	 */
-	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
+	if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
 		struct page *higher_page, *higher_buddy;
 		combined_pfn = buddy_pfn & pfn;
 		higher_page = page + (combined_pfn - pfn);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index dadb7e74d7d6..f4e17a57926a 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -106,7 +106,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 			buddy_pfn = __find_buddy_pfn(pfn, order);
 			buddy = page + (buddy_pfn - pfn);
 
-			if (pfn_valid_within(page_to_pfn(buddy)) &&
+			if (pfn_valid_within(buddy_pfn) &&
 			    !is_migrate_isolate_page(buddy)) {
 				__isolate_free_page(page, order);
 				isolated_page = true;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
@ 2016-12-16 12:00   ` Vlastimil Babka
  0 siblings, 0 replies; 8+ messages in thread
From: Vlastimil Babka @ 2016-12-16 12:00 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Joonsoo Kim, Michal Hocko, Mel Gorman,
	Kirill A. Shutemov, Johannes Weiner, Vlastimil Babka

On architectures that allow memory holes, page_is_buddy() has to perform
page_to_pfn() to check for the memory hole. After the previous patch, we have
the pfn already available in __free_one_page(), which is the only caller of
page_is_buddy(), so move the check there and avoid page_to_pfn().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
---
 mm/page_alloc.c     | 10 +++++-----
 mm/page_isolation.c |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 771fc8e18736..f546eeea24d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -714,7 +714,7 @@ static inline void rmv_page_order(struct page *page)
 /*
  * This function checks whether a page is free && is the buddy
  * we can do coalesce a page and its buddy if
- * (a) the buddy is not in a hole &&
+ * (a) the buddy is not in a hole (check before calling!) &&
  * (b) the buddy is in the buddy system &&
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
@@ -729,9 +729,6 @@ static inline void rmv_page_order(struct page *page)
 static inline int page_is_buddy(struct page *page, struct page *buddy,
 							unsigned int order)
 {
-	if (!pfn_valid_within(page_to_pfn(buddy)))
-		return 0;
-
 	if (page_is_guard(buddy) && page_order(buddy) == order) {
 		if (page_zone_id(page) != page_zone_id(buddy))
 			return 0;
@@ -808,6 +805,9 @@ static inline void __free_one_page(struct page *page,
 	while (order < max_order - 1) {
 		buddy_pfn = __find_buddy_pfn(pfn, order);
 		buddy = page + (buddy_pfn - pfn);
+
+		if (!pfn_valid_within(buddy_pfn))
+			goto done_merging;
 		if (!page_is_buddy(page, buddy, order))
 			goto done_merging;
 		/*
@@ -862,7 +862,7 @@ static inline void __free_one_page(struct page *page,
 	 * so it's less likely to be used soon and more likely to be merged
 	 * as a higher order page
 	 */
-	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
+	if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
 		struct page *higher_page, *higher_buddy;
 		combined_pfn = buddy_pfn & pfn;
 		higher_page = page + (combined_pfn - pfn);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index dadb7e74d7d6..f4e17a57926a 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -106,7 +106,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 			buddy_pfn = __find_buddy_pfn(pfn, order);
 			buddy = page + (buddy_pfn - pfn);
 
-			if (pfn_valid_within(page_to_pfn(buddy)) &&
+			if (pfn_valid_within(buddy_pfn) &&
 			    !is_migrate_isolate_page(buddy)) {
 				__isolate_free_page(page, order);
 				isolated_page = true;
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
  2016-12-16 12:00   ` Vlastimil Babka
@ 2017-03-07 18:40     ` Tony Luck
  -1 siblings, 0 replies; 8+ messages in thread
From: Tony Luck @ 2017-03-07 18:40 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: Andrew Morton, linux-mm, Linux Kernel Mailing List, Joonsoo Kim,
	Michal Hocko, Mel Gorman, Kirill A. Shutemov, Johannes Weiner

On Fri, Dec 16, 2016 at 4:00 AM, Vlastimil Babka <vbabka@suse.cz> wrote:
> On architectures that allow memory holes, page_is_buddy() has to perform
> page_to_pfn() to check for the memory hole. After the previous patch, we have
> the pfn already available in __free_one_page(), which is the only caller of
> page_is_buddy(), so move the check there and avoid page_to_pfn().
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: Mel Gorman <mgorman@techsingularity.net>

git bisect says this patch is the cause of an ia64 crash early in boot.

Reverting 13ad59df67f19788f6c22985b1a33e466eceb643
from v4.11-rc1 makes it boot again.

The commit messages talks about the "only caller" of page_is_buddy().
But grep shows two call sites:

mm/page_alloc.c:816:            if (!page_is_buddy(page, buddy, order))
mm/page_alloc.c:876:            if (page_is_buddy(higher_page,
higher_buddy, order + 1)) {


The crash happens due to a bad pointer in free_one_page()

Initmem setup node 0 [mem 0x0000000001000000-0x00000004fbffffff]
Built 1 zonelists in Node order, mobility grouping on.  Total pages: 260199
Policy zone: Normal
Kernel command line: BOOT_IMAGE=scsi0:\efi\SuSE\l-bisect.gz
console=tty1 console=uart,io,0x3f8 intel_iommu=off root=/dev/sda3
DMAR: IOMMU disabled
PID hash table entries: 4096 (order: -1, 32768 bytes)
Sorting __ex_table...
software IO TLB [mem 0x060b0000-0x0a0b0000] (64MB) mapped at
[e0000000060b0000-e00000000a0affff]
Unable to handle kernel paging request at virtual address a07fffffffc80018
swapper[0]: Oops 11012296146944 [1]
Modules linked in:

CPU: 0 PID: 0 Comm: swapper Not tainted 4.10.0-bisect-06032-g13ad59d #12
task: a000000101300000 task.stack: a000000101300000
psr : 00001210084a2010 ifs : 8000000000000a98 ip  :
[<a0000001001cc1e1>]    Not tainted (4.10.0-bisect-06032-g13ad59d)
ip is at free_one_page+0x561/0x740
unat: 0000000000000000 pfs : 0000000000000a98 rsc : 0000000000000003
rnat: 20c49ba5e353f7cf bsps: 04ba52743edc9494 pr  : 966696816682aa69
ldrs: 0000000000000000 ccv : 00000000000355b9 fpsr: 0009804c8a70433f
csd : 0930ffff00063000 ssd : 0930ffff00063000
b0  : a0000001001cc170 b6  : a0000001011a43c0 b7  : a000000100721ba0
f6  : 000000000000000000000 f7  : 1003e0044b82fa09b5a53
f8  : 1003e0000000000000000 f9  : 1003e00000000000016e8
f10 : 1003e0000000000000008 f11 : 1003e20c49ba5e353f7cf
r1  : a000000101a8ef60 r2  : 000000000000000e r3  : 0000000000000000
r8  : 0000000000000001 r9  : 0000000000000000 r10 : 0000000000230000
r11 : 000000000004fc00 r12 : a00000010130fdb0 r13 : a000000101300000
r14 : 0000000000000000 r15 : a07fffffffc80000 r16 : ffffffffffff0000
r17 : 0000000000000001 r18 : a07fffffffdd0037 r19 : 000000000000000c
r20 : 0000000000000001 r21 : a00000010130fdcc r22 : 0000000000044000
r23 : ffffffffffffff80 r24 : a07fffffffd60018 r25 : 0010000000000000
r26 : 0010000000000000 r27 : a07fffffffd60030 r28 : 0000000000000100
r29 : a0000001018b99c8 r30 : a0000001018b99c8 r31 : 000000000004fc00
unwind: cannot stack reg state!
unwind.desc_label_state(): out of memory
unwind: stack underflow!
unwind: failed to find state labeled 0x1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
@ 2017-03-07 18:40     ` Tony Luck
  0 siblings, 0 replies; 8+ messages in thread
From: Tony Luck @ 2017-03-07 18:40 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: Andrew Morton, linux-mm, Linux Kernel Mailing List, Joonsoo Kim,
	Michal Hocko, Mel Gorman, Kirill A. Shutemov, Johannes Weiner

On Fri, Dec 16, 2016 at 4:00 AM, Vlastimil Babka <vbabka@suse.cz> wrote:
> On architectures that allow memory holes, page_is_buddy() has to perform
> page_to_pfn() to check for the memory hole. After the previous patch, we have
> the pfn already available in __free_one_page(), which is the only caller of
> page_is_buddy(), so move the check there and avoid page_to_pfn().
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: Mel Gorman <mgorman@techsingularity.net>

git bisect says this patch is the cause of an ia64 crash early in boot.

Reverting 13ad59df67f19788f6c22985b1a33e466eceb643
from v4.11-rc1 makes it boot again.

The commit messages talks about the "only caller" of page_is_buddy().
But grep shows two call sites:

mm/page_alloc.c:816:            if (!page_is_buddy(page, buddy, order))
mm/page_alloc.c:876:            if (page_is_buddy(higher_page,
higher_buddy, order + 1)) {


The crash happens due to a bad pointer in free_one_page()

Initmem setup node 0 [mem 0x0000000001000000-0x00000004fbffffff]
Built 1 zonelists in Node order, mobility grouping on.  Total pages: 260199
Policy zone: Normal
Kernel command line: BOOT_IMAGE=scsi0:\efi\SuSE\l-bisect.gz
console=tty1 console=uart,io,0x3f8 intel_iommu=off root=/dev/sda3
DMAR: IOMMU disabled
PID hash table entries: 4096 (order: -1, 32768 bytes)
Sorting __ex_table...
software IO TLB [mem 0x060b0000-0x0a0b0000] (64MB) mapped at
[e0000000060b0000-e00000000a0affff]
Unable to handle kernel paging request at virtual address a07fffffffc80018
swapper[0]: Oops 11012296146944 [1]
Modules linked in:

CPU: 0 PID: 0 Comm: swapper Not tainted 4.10.0-bisect-06032-g13ad59d #12
task: a000000101300000 task.stack: a000000101300000
psr : 00001210084a2010 ifs : 8000000000000a98 ip  :
[<a0000001001cc1e1>]    Not tainted (4.10.0-bisect-06032-g13ad59d)
ip is at free_one_page+0x561/0x740
unat: 0000000000000000 pfs : 0000000000000a98 rsc : 0000000000000003
rnat: 20c49ba5e353f7cf bsps: 04ba52743edc9494 pr  : 966696816682aa69
ldrs: 0000000000000000 ccv : 00000000000355b9 fpsr: 0009804c8a70433f
csd : 0930ffff00063000 ssd : 0930ffff00063000
b0  : a0000001001cc170 b6  : a0000001011a43c0 b7  : a000000100721ba0
f6  : 000000000000000000000 f7  : 1003e0044b82fa09b5a53
f8  : 1003e0000000000000000 f9  : 1003e00000000000016e8
f10 : 1003e0000000000000008 f11 : 1003e20c49ba5e353f7cf
r1  : a000000101a8ef60 r2  : 000000000000000e r3  : 0000000000000000
r8  : 0000000000000001 r9  : 0000000000000000 r10 : 0000000000230000
r11 : 000000000004fc00 r12 : a00000010130fdb0 r13 : a000000101300000
r14 : 0000000000000000 r15 : a07fffffffc80000 r16 : ffffffffffff0000
r17 : 0000000000000001 r18 : a07fffffffdd0037 r19 : 000000000000000c
r20 : 0000000000000001 r21 : a00000010130fdcc r22 : 0000000000044000
r23 : ffffffffffffff80 r24 : a07fffffffd60018 r25 : 0010000000000000
r26 : 0010000000000000 r27 : a07fffffffd60030 r28 : 0000000000000100
r29 : a0000001018b99c8 r30 : a0000001018b99c8 r31 : 000000000004fc00
unwind: cannot stack reg state!
unwind.desc_label_state(): out of memory
unwind: stack underflow!
unwind: failed to find state labeled 0x1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
  2017-03-07 18:40     ` Tony Luck
@ 2017-03-07 19:36       ` Tony Luck
  -1 siblings, 0 replies; 8+ messages in thread
From: Tony Luck @ 2017-03-07 19:36 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: Andrew Morton, linux-mm, Linux Kernel Mailing List, Joonsoo Kim,
	Michal Hocko, Mel Gorman, Kirill A. Shutemov, Johannes Weiner

On Tue, Mar 7, 2017 at 10:40 AM, Tony Luck <tony.luck@gmail.com> wrote:
> The commit messages talks about the "only caller" of page_is_buddy().
> But grep shows two call sites:
>
> mm/page_alloc.c:816:            if (!page_is_buddy(page, buddy, order))
> mm/page_alloc.c:876:            if (page_is_buddy(higher_page,

and it looks like the second one is the problem:

        if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
                struct page *higher_page, *higher_buddy;
                combined_pfn = buddy_pfn & pfn;
                higher_page = page + (combined_pfn - pfn);
                buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
                        list_add_tail(&page->lru,
                                &zone->free_area[order].free_list[migratetype]);
                        goto out;
                }
        }

Although outer "if" checked for pfn_valid_within(buddy_pfn),
we actually pass "higher_buddy" to this call of page_is_buddy().

-Tony

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies
@ 2017-03-07 19:36       ` Tony Luck
  0 siblings, 0 replies; 8+ messages in thread
From: Tony Luck @ 2017-03-07 19:36 UTC (permalink / raw)
  To: Vlastimil Babka
  Cc: Andrew Morton, linux-mm, Linux Kernel Mailing List, Joonsoo Kim,
	Michal Hocko, Mel Gorman, Kirill A. Shutemov, Johannes Weiner

On Tue, Mar 7, 2017 at 10:40 AM, Tony Luck <tony.luck@gmail.com> wrote:
> The commit messages talks about the "only caller" of page_is_buddy().
> But grep shows two call sites:
>
> mm/page_alloc.c:816:            if (!page_is_buddy(page, buddy, order))
> mm/page_alloc.c:876:            if (page_is_buddy(higher_page,

and it looks like the second one is the problem:

        if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
                struct page *higher_page, *higher_buddy;
                combined_pfn = buddy_pfn & pfn;
                higher_page = page + (combined_pfn - pfn);
                buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
                        list_add_tail(&page->lru,
                                &zone->free_area[order].free_list[migratetype]);
                        goto out;
                }
        }

Although outer "if" checked for pfn_valid_within(buddy_pfn),
we actually pass "higher_buddy" to this call of page_is_buddy().

-Tony

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-03-07 19:44 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-16 12:00 [PATCH v2 1/2] mm, page_alloc: don't convert pfn to idx when merging Vlastimil Babka
2016-12-16 12:00 ` Vlastimil Babka
2016-12-16 12:00 ` [PATCH v2 2/2] mm, page_alloc: avoid page_to_pfn() when merging buddies Vlastimil Babka
2016-12-16 12:00   ` Vlastimil Babka
2017-03-07 18:40   ` Tony Luck
2017-03-07 18:40     ` Tony Luck
2017-03-07 19:36     ` Tony Luck
2017-03-07 19:36       ` Tony Luck

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.