All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] mm: more intensive memory corruption debug
@ 2011-11-18 16:25 ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

With CONFIG_DEBUG_PAGEALLOC configured, cpu will generate exception on
access (read,write) to not allocated page, what allow to catch code
which corrupt memory. However kernel is trying to maximalise memory
usage, hence there is usually not much free pages in the system and
buggy code usually corrupt some crucial data.

This patch change buddy allocator to keep more free/protected pages
and interlace free/protected and allocated pages to increase probability
of catch a corruption.

When kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
debug_guardpage_minorder defines the minimum order used by the page
allocator to grant a request. The requested size will be returned with
the remaining pages used as guard pages.

v1 -> v2:
 - change "corrupt" name to guard page
 - document disability to debug memory corruption happened at bus level
 - document max order limitation
 - add comments about adding/removing guard pages from buddy allocator
 - do not change types in expand()

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 Documentation/kernel-parameters.txt |   18 ++++++++
 include/linux/mm.h                  |   17 ++++++++
 include/linux/page-debug-flags.h    |    4 +-
 mm/Kconfig.debug                    |    5 ++
 mm/page_alloc.c                     |   75 ++++++++++++++++++++++++++++++++---
 5 files changed, 112 insertions(+), 7 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a0c5c5f..92e2076 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -623,6 +623,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	no_debug_objects
 			[KNL] Disable object debugging
 
+	debug_guardpage_minorder=
+			[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+			parameter allows control order of pages that will be
+			intentionally kept free (and hence protected) by buddy
+			allocator. Bigger value increase probability of
+			catching random memory corruption, but reduce amount
+			of memory for normal system use. Maximum possible
+			value is MAX_ORDER/2. Setting this parameter to 1 or 2,
+			should be enough to identify most random memory
+			corruption problems caused by bugs in kernel/drivers
+			code when CPU write to (or read from) random memory
+			location. Note that there exist class of memory
+			corruptions problems caused by buggy H/W or F/W or by
+			drivers badly programing DMA (basically when memory is
+			written at bus level and CPU MMU is bypassed), which
+			are not detectable by CONFIG_DEBUG_PAGEALLOC, hence this
+			option would not help tracking down these problems too.
+
 	debugpat	[X86] Enable PAT debugging
 
 	decnet.addr=	[HW,NET]
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0a22db1..90c3f69 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1617,5 +1617,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
 				unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+	return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+	return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
index b0638fd..22691f6 100644
--- a/include/linux/page-debug-flags.h
+++ b/include/linux/page-debug-flags.h
@@ -13,6 +13,7 @@
 
 enum page_debug_flags {
 	PAGE_DEBUG_FLAG_POISON,		/* Page is poisoned */
+	PAGE_DEBUG_FLAG_GUARD,
 };
 
 /*
@@ -21,7 +22,8 @@ enum page_debug_flags {
  */
 
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) \
+#if !defined(CONFIG_PAGE_POISONING) && \
+    !defined(CONFIG_PAGE_GUARD) \
 /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
 #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
 #endif
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 8b1a477..4b24432 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
 	depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
 	depends on !KMEMCHECK
 	select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	---help---
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
 config PAGE_POISONING
 	bool
 	select WANT_PAGE_DEBUG_FLAGS
+
+config PAGE_GUARD
+	bool
+	select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d..16e4f8e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -403,6 +404,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 		clear_highpage(page + i);
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+unsigned int _debug_guardpage_minorder;
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+	unsigned long res;
+
+	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
+		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
+		return 0;
+	}
+	_debug_guardpage_minorder = res;
+	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
+	return 0;
+}
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+
+static inline void set_page_guard_flg(struct page *page)
+{
+	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+
+static inline void clear_page_guard_flg(struct page *page)
+{
+	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline void set_page_guard_flg(struct page *page) { }
+static inline void clear_page_guard_flg(struct page *page) { }
+#endif
+
 static inline void set_page_order(struct page *page, int order)
 {
 	set_page_private(page, order);
@@ -460,6 +492,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
 	if (page_zone_id(page) != page_zone_id(buddy))
 		return 0;
 
+	if (page_is_guard(buddy) && page_order(buddy) == order) {
+		VM_BUG_ON(page_count(buddy) != 0);
+		return 1;
+	}
+
 	if (PageBuddy(buddy) && page_order(buddy) == order) {
 		VM_BUG_ON(page_count(buddy) != 0);
 		return 1;
@@ -516,11 +553,19 @@ static inline void __free_one_page(struct page *page,
 		buddy = page + (buddy_idx - page_idx);
 		if (!page_is_buddy(page, buddy, order))
 			break;
-
-		/* Our buddy is free, merge with it and move up one order. */
-		list_del(&buddy->lru);
-		zone->free_area[order].nr_free--;
-		rmv_page_order(buddy);
+		/*
+		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+		 * merge with it and move up one order.
+		 */
+		if (page_is_guard(buddy)) {
+			clear_page_guard_flg(buddy);
+			set_page_private(page, 0);
+			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+		} else {
+			list_del(&buddy->lru);
+			zone->free_area[order].nr_free--;
+			rmv_page_order(buddy);
+		}
 		combined_idx = buddy_idx & page_idx;
 		page = page + (combined_idx - page_idx);
 		page_idx = combined_idx;
@@ -746,6 +791,23 @@ static inline void expand(struct zone *zone, struct page *page,
 		high--;
 		size >>= 1;
 		VM_BUG_ON(bad_range(zone, &page[size]));
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+		if (high < debug_guardpage_minorder()) {
+			/*
+			 * Mark as guard pages (or page), that will allow to
+			 * merge back to allocator when buddy will be freed.
+			 * Corresponding page table entries will not be touched,
+			 * pages will stay not present in virtual address space
+			 */
+			INIT_LIST_HEAD(&page[size].lru);
+			set_page_guard_flg(&page[size]);
+			set_page_private(&page[size], high);
+			/* Guard pages are not available for any usage */
+			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+			continue;
+		}
+#endif
 		list_add(&page[size].lru, &area->free_list[migratetype]);
 		area->nr_free++;
 		set_page_order(&page[size], high);
@@ -1756,7 +1818,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 {
 	unsigned int filter = SHOW_MEM_FILTER_NODES;
 
-	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
+	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+	    debug_guardpage_minorder() > 0)
 		return;
 
 	/*
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 1/3] mm: more intensive memory corruption debug
@ 2011-11-18 16:25 ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

With CONFIG_DEBUG_PAGEALLOC configured, cpu will generate exception on
access (read,write) to not allocated page, what allow to catch code
which corrupt memory. However kernel is trying to maximalise memory
usage, hence there is usually not much free pages in the system and
buggy code usually corrupt some crucial data.

This patch change buddy allocator to keep more free/protected pages
and interlace free/protected and allocated pages to increase probability
of catch a corruption.

When kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
debug_guardpage_minorder defines the minimum order used by the page
allocator to grant a request. The requested size will be returned with
the remaining pages used as guard pages.

v1 -> v2:
 - change "corrupt" name to guard page
 - document disability to debug memory corruption happened at bus level
 - document max order limitation
 - add comments about adding/removing guard pages from buddy allocator
 - do not change types in expand()

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 Documentation/kernel-parameters.txt |   18 ++++++++
 include/linux/mm.h                  |   17 ++++++++
 include/linux/page-debug-flags.h    |    4 +-
 mm/Kconfig.debug                    |    5 ++
 mm/page_alloc.c                     |   75 ++++++++++++++++++++++++++++++++---
 5 files changed, 112 insertions(+), 7 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a0c5c5f..92e2076 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -623,6 +623,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 	no_debug_objects
 			[KNL] Disable object debugging
 
+	debug_guardpage_minorder=
+			[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+			parameter allows control order of pages that will be
+			intentionally kept free (and hence protected) by buddy
+			allocator. Bigger value increase probability of
+			catching random memory corruption, but reduce amount
+			of memory for normal system use. Maximum possible
+			value is MAX_ORDER/2. Setting this parameter to 1 or 2,
+			should be enough to identify most random memory
+			corruption problems caused by bugs in kernel/drivers
+			code when CPU write to (or read from) random memory
+			location. Note that there exist class of memory
+			corruptions problems caused by buggy H/W or F/W or by
+			drivers badly programing DMA (basically when memory is
+			written at bus level and CPU MMU is bypassed), which
+			are not detectable by CONFIG_DEBUG_PAGEALLOC, hence this
+			option would not help tracking down these problems too.
+
 	debugpat	[X86] Enable PAT debugging
 
 	decnet.addr=	[HW,NET]
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0a22db1..90c3f69 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1617,5 +1617,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
 				unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+	return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+	return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
index b0638fd..22691f6 100644
--- a/include/linux/page-debug-flags.h
+++ b/include/linux/page-debug-flags.h
@@ -13,6 +13,7 @@
 
 enum page_debug_flags {
 	PAGE_DEBUG_FLAG_POISON,		/* Page is poisoned */
+	PAGE_DEBUG_FLAG_GUARD,
 };
 
 /*
@@ -21,7 +22,8 @@ enum page_debug_flags {
  */
 
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) \
+#if !defined(CONFIG_PAGE_POISONING) && \
+    !defined(CONFIG_PAGE_GUARD) \
 /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
 #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
 #endif
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 8b1a477..4b24432 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
 	depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
 	depends on !KMEMCHECK
 	select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	---help---
 	  Unmap pages from the kernel linear mapping after free_pages().
 	  This results in a large slowdown, but helps to find certain types
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
 config PAGE_POISONING
 	bool
 	select WANT_PAGE_DEBUG_FLAGS
+
+config PAGE_GUARD
+	bool
+	select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d..16e4f8e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/page-debug-flags.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -403,6 +404,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 		clear_highpage(page + i);
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+unsigned int _debug_guardpage_minorder;
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+	unsigned long res;
+
+	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
+		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
+		return 0;
+	}
+	_debug_guardpage_minorder = res;
+	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
+	return 0;
+}
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+
+static inline void set_page_guard_flg(struct page *page)
+{
+	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+
+static inline void clear_page_guard_flg(struct page *page)
+{
+	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline void set_page_guard_flg(struct page *page) { }
+static inline void clear_page_guard_flg(struct page *page) { }
+#endif
+
 static inline void set_page_order(struct page *page, int order)
 {
 	set_page_private(page, order);
@@ -460,6 +492,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
 	if (page_zone_id(page) != page_zone_id(buddy))
 		return 0;
 
+	if (page_is_guard(buddy) && page_order(buddy) == order) {
+		VM_BUG_ON(page_count(buddy) != 0);
+		return 1;
+	}
+
 	if (PageBuddy(buddy) && page_order(buddy) == order) {
 		VM_BUG_ON(page_count(buddy) != 0);
 		return 1;
@@ -516,11 +553,19 @@ static inline void __free_one_page(struct page *page,
 		buddy = page + (buddy_idx - page_idx);
 		if (!page_is_buddy(page, buddy, order))
 			break;
-
-		/* Our buddy is free, merge with it and move up one order. */
-		list_del(&buddy->lru);
-		zone->free_area[order].nr_free--;
-		rmv_page_order(buddy);
+		/*
+		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+		 * merge with it and move up one order.
+		 */
+		if (page_is_guard(buddy)) {
+			clear_page_guard_flg(buddy);
+			set_page_private(page, 0);
+			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+		} else {
+			list_del(&buddy->lru);
+			zone->free_area[order].nr_free--;
+			rmv_page_order(buddy);
+		}
 		combined_idx = buddy_idx & page_idx;
 		page = page + (combined_idx - page_idx);
 		page_idx = combined_idx;
@@ -746,6 +791,23 @@ static inline void expand(struct zone *zone, struct page *page,
 		high--;
 		size >>= 1;
 		VM_BUG_ON(bad_range(zone, &page[size]));
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+		if (high < debug_guardpage_minorder()) {
+			/*
+			 * Mark as guard pages (or page), that will allow to
+			 * merge back to allocator when buddy will be freed.
+			 * Corresponding page table entries will not be touched,
+			 * pages will stay not present in virtual address space
+			 */
+			INIT_LIST_HEAD(&page[size].lru);
+			set_page_guard_flg(&page[size]);
+			set_page_private(&page[size], high);
+			/* Guard pages are not available for any usage */
+			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+			continue;
+		}
+#endif
 		list_add(&page[size].lru, &area->free_list[migratetype]);
 		area->nr_free++;
 		set_page_order(&page[size], high);
@@ -1756,7 +1818,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 {
 	unsigned int filter = SHOW_MEM_FILTER_NODES;
 
-	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
+	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+	    debug_guardpage_minorder() > 0)
 		return;
 
 	/*
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 2/3] PM / Hibernate : do not count debug pages as savable
  2011-11-18 16:25 ` Stanislaw Gruszka
@ 2011-11-18 16:25   ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

When debugging with CONFIG_DEBUG_PAGEALLOC and
debug_guardpage_minorder > 0, we have lot of free pages that are not
marked so. Snapshot code account them as savable, what cause hibernate
memory preallocation failure.

It is pretty hard to make hibernate allocation succeed with
debug_guardpage_minorder=1. This change at least make it possible when
system has relatively big amount of RAM.

v1 -> v2:
 - change "corrupt" name to guard page

Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 kernel/power/snapshot.c |    6 ++++++
 1 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index cbe2c14..1cf8890 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -858,6 +858,9 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
 	    PageReserved(page))
 		return NULL;
 
+	if (page_is_guard(page))
+		return NULL;
+
 	return page;
 }
 
@@ -920,6 +923,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
 		return NULL;
 
+	if (page_is_guard(page))
+		return NULL;
+
 	return page;
 }
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v2 2/3] PM / Hibernate : do not count debug pages as savable
@ 2011-11-18 16:25   ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

When debugging with CONFIG_DEBUG_PAGEALLOC and
debug_guardpage_minorder > 0, we have lot of free pages that are not
marked so. Snapshot code account them as savable, what cause hibernate
memory preallocation failure.

It is pretty hard to make hibernate allocation succeed with
debug_guardpage_minorder=1. This change at least make it possible when
system has relatively big amount of RAM.

v1 -> v2:
 - change "corrupt" name to guard page

Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 kernel/power/snapshot.c |    6 ++++++
 1 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index cbe2c14..1cf8890 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -858,6 +858,9 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
 	    PageReserved(page))
 		return NULL;
 
+	if (page_is_guard(page))
+		return NULL;
+
 	return page;
 }
 
@@ -920,6 +923,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
 		return NULL;
 
+	if (page_is_guard(page))
+		return NULL;
+
 	return page;
 }
 
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
  2011-11-18 16:25 ` Stanislaw Gruszka
@ 2011-11-18 16:25   ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

Disable slub debug facilities and allocate slabs at minimal order when
debug_guardpage_minorder > 0 to increase probability to catch random
memory corruption by cpu exception.

v1 -> v2:
  - use slub_max_order to minimalize slub order

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 mm/slub.c |    3 +++
 1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996..a66be56 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
 	struct kmem_cache *temp_kmem_cache_node;
 	unsigned long kmalloc_size;
 
+	if (debug_guardpage_minorder())
+		slub_max_order = 0;
+
 	kmem_size = offsetof(struct kmem_cache, node) +
 				nr_node_ids * sizeof(struct kmem_cache_node *);
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
@ 2011-11-18 16:25   ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-18 16:25 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Mel Gorman, Andrea Arcangeli, Andrew Morton,
	Rafael J. Wysocki, Christoph Lameter, Stanislaw Gruszka

Disable slub debug facilities and allocate slabs at minimal order when
debug_guardpage_minorder > 0 to increase probability to catch random
memory corruption by cpu exception.

v1 -> v2:
  - use slub_max_order to minimalize slub order

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 mm/slub.c |    3 +++
 1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996..a66be56 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
 	struct kmem_cache *temp_kmem_cache_node;
 	unsigned long kmalloc_size;
 
+	if (debug_guardpage_minorder())
+		slub_max_order = 0;
+
 	kmem_size = offsetof(struct kmem_cache, node) +
 				nr_node_ids * sizeof(struct kmem_cache_node *);
 
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
  2011-11-18 16:25   ` Stanislaw Gruszka
@ 2011-11-21 17:15     ` Christoph Lameter
  -1 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2011-11-21 17:15 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki

On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:

> Disable slub debug facilities and allocate slabs at minimal order when
> debug_guardpage_minorder > 0 to increase probability to catch random
> memory corruption by cpu exception.

Acked-by: Christoph Lameter <cl@linux.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
@ 2011-11-21 17:15     ` Christoph Lameter
  0 siblings, 0 replies; 32+ messages in thread
From: Christoph Lameter @ 2011-11-21 17:15 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki

On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:

> Disable slub debug facilities and allocate slabs at minimal order when
> debug_guardpage_minorder > 0 to increase probability to catch random
> memory corruption by cpu exception.

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/3] mm: more intensive memory corruption debug
  2011-11-18 16:25 ` Stanislaw Gruszka
@ 2011-11-22 21:56   ` Andrew Morton
  -1 siblings, 0 replies; 32+ messages in thread
From: Andrew Morton @ 2011-11-22 21:56 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Rafael J. Wysocki, Christoph Lameter

On Fri, 18 Nov 2011 17:25:05 +0100
Stanislaw Gruszka <sgruszka@redhat.com> wrote:

> With CONFIG_DEBUG_PAGEALLOC configured, cpu will generate exception on
> access (read,write) to not allocated page, what allow to catch code
> which corrupt memory. However kernel is trying to maximalise memory
> usage, hence there is usually not much free pages in the system and
> buggy code usually corrupt some crucial data.
> 
> This patch change buddy allocator to keep more free/protected pages
> and interlace free/protected and allocated pages to increase probability
> of catch a corruption.
> 
> When kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
> debug_guardpage_minorder defines the minimum order used by the page
> allocator to grant a request. The requested size will be returned with
> the remaining pages used as guard pages.
> 

I added this:

  The default value of debug_guardpage_minorder is zero: no change
  from current behaviour.

correct?

>
> ...
>
> +static int __init debug_guardpage_minorder_setup(char *buf)
> +{
> +	unsigned long res;
> +
> +	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
> +		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
> +		return 0;
> +	}
> +	_debug_guardpage_minorder = res;
> +	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
> +	return 0;
> +}
> +__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
> +
> +static inline void set_page_guard_flg(struct page *page)

"flag" not "flg", please ;)

> +{
> +	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> +}
> +
> +static inline void clear_page_guard_flg(struct page *page)
> +{
> +	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> +}

Why is it safe to use the non-atomic bitops here.

Please verify that CONFIG_WANT_PAGE_DEBUG_FLAGS is always reliably
enabled when this feature is turned on.

>
> ...
>


Some changes I made - please review.


 Documentation/kernel-parameters.txt |   31 +++++++++++++-------------
 mm/page_alloc.c                     |   12 +++++-----
 2 files changed, 22 insertions(+), 21 deletions(-)

diff -puN Documentation/kernel-parameters.txt~mm-more-intensive-memory-corruption-debug-fix Documentation/kernel-parameters.txt
--- a/Documentation/kernel-parameters.txt~mm-more-intensive-memory-corruption-debug-fix
+++ a/Documentation/kernel-parameters.txt
@@ -625,21 +625,22 @@ bytes respectively. Such letter suffixes
 
 	debug_guardpage_minorder=
 			[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
-			parameter allows control order of pages that will be
-			intentionally kept free (and hence protected) by buddy
-			allocator. Bigger value increase probability of
-			catching random memory corruption, but reduce amount
-			of memory for normal system use. Maximum possible
-			value is MAX_ORDER/2. Setting this parameter to 1 or 2,
-			should be enough to identify most random memory
-			corruption problems caused by bugs in kernel/drivers
-			code when CPU write to (or read from) random memory
-			location. Note that there exist class of memory
-			corruptions problems caused by buggy H/W or F/W or by
-			drivers badly programing DMA (basically when memory is
-			written at bus level and CPU MMU is bypassed), which
-			are not detectable by CONFIG_DEBUG_PAGEALLOC, hence this
-			option would not help tracking down these problems too.
+			parameter allows control of the order of pages that will
+			be intentionally kept free (and hence protected) by the
+			buddy allocator. Bigger value increase the probability
+			of catching random memory corruption, but reduce the
+			amount of memory for normal system use. The maximum
+			possible value is MAX_ORDER/2.  Setting this parameter
+			to 1 or 2 should be enough to identify most random
+			memory corruption problems caused by bugs in kernel or
+			driver code when a CPU writes to (or reads from) a
+			random memory location. Note that there exists a class
+			of memory corruptions problems caused by buggy H/W or
+			F/W or by drivers badly programing DMA (basically when
+			memory is written at bus level and the CPU MMU is
+			bypassed) which are not detectable by
+			CONFIG_DEBUG_PAGEALLOC, hence this option will not help
+			tracking down these problems.
 
 	debugpat	[X86] Enable PAT debugging
 
diff -puN mm/page_alloc.c~mm-more-intensive-memory-corruption-debug-fix mm/page_alloc.c
--- a/mm/page_alloc.c~mm-more-intensive-memory-corruption-debug-fix
+++ a/mm/page_alloc.c
@@ -441,18 +441,18 @@ static int __init debug_guardpage_minord
 }
 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 
-static inline void set_page_guard_flg(struct page *page)
+static inline void set_page_guard_flag(struct page *page)
 {
 	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 }
 
-static inline void clear_page_guard_flg(struct page *page)
+static inline void clear_page_guard_flag(struct page *page)
 {
 	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 }
 #else
-static inline void set_page_guard_flg(struct page *page) { }
-static inline void clear_page_guard_flg(struct page *page) { }
+static inline void set_page_guard_flag(struct page *page) { }
+static inline void clear_page_guard_flag(struct page *page) { }
 #endif
 
 static inline void set_page_order(struct page *page, int order)
@@ -578,7 +578,7 @@ static inline void __free_one_page(struc
 		 * merge with it and move up one order.
 		 */
 		if (page_is_guard(buddy)) {
-			clear_page_guard_flg(buddy);
+			clear_page_guard_flag(buddy);
 			set_page_private(page, 0);
 			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 		} else {
@@ -821,7 +821,7 @@ static inline void expand(struct zone *z
 			 * pages will stay not present in virtual address space
 			 */
 			INIT_LIST_HEAD(&page[size].lru);
-			set_page_guard_flg(&page[size]);
+			set_page_guard_flag(&page[size]);
 			set_page_private(&page[size], high);
 			/* Guard pages are not available for any usage */
 			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
_


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/3] mm: more intensive memory corruption debug
@ 2011-11-22 21:56   ` Andrew Morton
  0 siblings, 0 replies; 32+ messages in thread
From: Andrew Morton @ 2011-11-22 21:56 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Rafael J. Wysocki, Christoph Lameter

On Fri, 18 Nov 2011 17:25:05 +0100
Stanislaw Gruszka <sgruszka@redhat.com> wrote:

> With CONFIG_DEBUG_PAGEALLOC configured, cpu will generate exception on
> access (read,write) to not allocated page, what allow to catch code
> which corrupt memory. However kernel is trying to maximalise memory
> usage, hence there is usually not much free pages in the system and
> buggy code usually corrupt some crucial data.
> 
> This patch change buddy allocator to keep more free/protected pages
> and interlace free/protected and allocated pages to increase probability
> of catch a corruption.
> 
> When kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
> debug_guardpage_minorder defines the minimum order used by the page
> allocator to grant a request. The requested size will be returned with
> the remaining pages used as guard pages.
> 

I added this:

  The default value of debug_guardpage_minorder is zero: no change
  from current behaviour.

correct?

>
> ...
>
> +static int __init debug_guardpage_minorder_setup(char *buf)
> +{
> +	unsigned long res;
> +
> +	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
> +		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
> +		return 0;
> +	}
> +	_debug_guardpage_minorder = res;
> +	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
> +	return 0;
> +}
> +__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
> +
> +static inline void set_page_guard_flg(struct page *page)

"flag" not "flg", please ;)

> +{
> +	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> +}
> +
> +static inline void clear_page_guard_flg(struct page *page)
> +{
> +	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> +}

Why is it safe to use the non-atomic bitops here.

Please verify that CONFIG_WANT_PAGE_DEBUG_FLAGS is always reliably
enabled when this feature is turned on.

>
> ...
>


Some changes I made - please review.


 Documentation/kernel-parameters.txt |   31 +++++++++++++-------------
 mm/page_alloc.c                     |   12 +++++-----
 2 files changed, 22 insertions(+), 21 deletions(-)

diff -puN Documentation/kernel-parameters.txt~mm-more-intensive-memory-corruption-debug-fix Documentation/kernel-parameters.txt
--- a/Documentation/kernel-parameters.txt~mm-more-intensive-memory-corruption-debug-fix
+++ a/Documentation/kernel-parameters.txt
@@ -625,21 +625,22 @@ bytes respectively. Such letter suffixes
 
 	debug_guardpage_minorder=
 			[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
-			parameter allows control order of pages that will be
-			intentionally kept free (and hence protected) by buddy
-			allocator. Bigger value increase probability of
-			catching random memory corruption, but reduce amount
-			of memory for normal system use. Maximum possible
-			value is MAX_ORDER/2. Setting this parameter to 1 or 2,
-			should be enough to identify most random memory
-			corruption problems caused by bugs in kernel/drivers
-			code when CPU write to (or read from) random memory
-			location. Note that there exist class of memory
-			corruptions problems caused by buggy H/W or F/W or by
-			drivers badly programing DMA (basically when memory is
-			written at bus level and CPU MMU is bypassed), which
-			are not detectable by CONFIG_DEBUG_PAGEALLOC, hence this
-			option would not help tracking down these problems too.
+			parameter allows control of the order of pages that will
+			be intentionally kept free (and hence protected) by the
+			buddy allocator. Bigger value increase the probability
+			of catching random memory corruption, but reduce the
+			amount of memory for normal system use. The maximum
+			possible value is MAX_ORDER/2.  Setting this parameter
+			to 1 or 2 should be enough to identify most random
+			memory corruption problems caused by bugs in kernel or
+			driver code when a CPU writes to (or reads from) a
+			random memory location. Note that there exists a class
+			of memory corruptions problems caused by buggy H/W or
+			F/W or by drivers badly programing DMA (basically when
+			memory is written at bus level and the CPU MMU is
+			bypassed) which are not detectable by
+			CONFIG_DEBUG_PAGEALLOC, hence this option will not help
+			tracking down these problems.
 
 	debugpat	[X86] Enable PAT debugging
 
diff -puN mm/page_alloc.c~mm-more-intensive-memory-corruption-debug-fix mm/page_alloc.c
--- a/mm/page_alloc.c~mm-more-intensive-memory-corruption-debug-fix
+++ a/mm/page_alloc.c
@@ -441,18 +441,18 @@ static int __init debug_guardpage_minord
 }
 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 
-static inline void set_page_guard_flg(struct page *page)
+static inline void set_page_guard_flag(struct page *page)
 {
 	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 }
 
-static inline void clear_page_guard_flg(struct page *page)
+static inline void clear_page_guard_flag(struct page *page)
 {
 	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 }
 #else
-static inline void set_page_guard_flg(struct page *page) { }
-static inline void clear_page_guard_flg(struct page *page) { }
+static inline void set_page_guard_flag(struct page *page) { }
+static inline void clear_page_guard_flag(struct page *page) { }
 #endif
 
 static inline void set_page_order(struct page *page, int order)
@@ -578,7 +578,7 @@ static inline void __free_one_page(struc
 		 * merge with it and move up one order.
 		 */
 		if (page_is_guard(buddy)) {
-			clear_page_guard_flg(buddy);
+			clear_page_guard_flag(buddy);
 			set_page_private(page, 0);
 			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 		} else {
@@ -821,7 +821,7 @@ static inline void expand(struct zone *z
 			 * pages will stay not present in virtual address space
 			 */
 			INIT_LIST_HEAD(&page[size].lru);
-			set_page_guard_flg(&page[size]);
+			set_page_guard_flag(&page[size]);
 			set_page_private(&page[size], high);
 			/* Guard pages are not available for any usage */
 			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
_

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/3] mm: more intensive memory corruption debug
  2011-11-22 21:56   ` Andrew Morton
@ 2011-11-23 13:25     ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-23 13:25 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Rafael J. Wysocki, Christoph Lameter

On Tue, Nov 22, 2011 at 01:56:08PM -0800, Andrew Morton wrote:
> On Fri, 18 Nov 2011 17:25:05 +0100
> Stanislaw Gruszka <sgruszka@redhat.com> wrote:
> I added this:
> 
>   The default value of debug_guardpage_minorder is zero: no change
>   from current behaviour.
> 
> correct?
Yes,

> > +static inline void clear_page_guard_flg(struct page *page)
> > +{
> > +	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> > +}
> 
> Why is it safe to use the non-atomic bitops here.
Clearing/setting flag is done only in __free_one_page()/expand(),
so operations are protected by zone->lock.

> Please verify that CONFIG_WANT_PAGE_DEBUG_FLAGS is always reliably
> enabled when this feature is turned on.
Change in mm/Kconfig.debug assures that CONFIG_WANT_PAGE_DEBUG_FLAGS is
set whenever CONFIG_DEBUG_PAGEALLOC is. 

> Some changes I made - please review.
Look good, thanks Andrew!

Stanislaw

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v2 1/3] mm: more intensive memory corruption debug
@ 2011-11-23 13:25     ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-11-23 13:25 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Rafael J. Wysocki, Christoph Lameter

On Tue, Nov 22, 2011 at 01:56:08PM -0800, Andrew Morton wrote:
> On Fri, 18 Nov 2011 17:25:05 +0100
> Stanislaw Gruszka <sgruszka@redhat.com> wrote:
> I added this:
> 
>   The default value of debug_guardpage_minorder is zero: no change
>   from current behaviour.
> 
> correct?
Yes,

> > +static inline void clear_page_guard_flg(struct page *page)
> > +{
> > +	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
> > +}
> 
> Why is it safe to use the non-atomic bitops here.
Clearing/setting flag is done only in __free_one_page()/expand(),
so operations are protected by zone->lock.

> Please verify that CONFIG_WANT_PAGE_DEBUG_FLAGS is always reliably
> enabled when this feature is turned on.
Change in mm/Kconfig.debug assures that CONFIG_WANT_PAGE_DEBUG_FLAGS is
set whenever CONFIG_DEBUG_PAGEALLOC is. 

> Some changes I made - please review.
Look good, thanks Andrew!

Stanislaw

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
  2011-11-18 16:25   ` Stanislaw Gruszka
@ 2011-12-07 22:07     ` David Rientjes
  -1 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-07 22:07 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:

> diff --git a/mm/slub.c b/mm/slub.c
> index 7d2a996..a66be56 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
>  	struct kmem_cache *temp_kmem_cache_node;
>  	unsigned long kmalloc_size;
>  
> +	if (debug_guardpage_minorder())
> +		slub_max_order = 0;
> +
>  	kmem_size = offsetof(struct kmem_cache, node) +
>  				nr_node_ids * sizeof(struct kmem_cache_node *);
> 

I'd recommend at least printing a warning about why slub_max_order was 
reduced because users may be concerned why they can't now change any 
cache's order with /sys/kernel/slab/cache/order.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
@ 2011-12-07 22:07     ` David Rientjes
  0 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-07 22:07 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:

> diff --git a/mm/slub.c b/mm/slub.c
> index 7d2a996..a66be56 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
>  	struct kmem_cache *temp_kmem_cache_node;
>  	unsigned long kmalloc_size;
>  
> +	if (debug_guardpage_minorder())
> +		slub_max_order = 0;
> +
>  	kmem_size = offsetof(struct kmem_cache, node) +
>  				nr_node_ids * sizeof(struct kmem_cache_node *);
> 

I'd recommend at least printing a warning about why slub_max_order was 
reduced because users may be concerned why they can't now change any 
cache's order with /sys/kernel/slab/cache/order.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
  2011-12-07 22:07     ` David Rientjes
@ 2011-12-08  7:33       ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-08  7:33 UTC (permalink / raw)
  To: David Rientjes
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Wed, Dec 07, 2011 at 02:07:55PM -0800, David Rientjes wrote:
> On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:
> 
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 7d2a996..a66be56 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
> >  	struct kmem_cache *temp_kmem_cache_node;
> >  	unsigned long kmalloc_size;
> >  
> > +	if (debug_guardpage_minorder())
> > +		slub_max_order = 0;
> > +
> >  	kmem_size = offsetof(struct kmem_cache, node) +
> >  				nr_node_ids * sizeof(struct kmem_cache_node *);
> > 
> 
> I'd recommend at least printing a warning about why slub_max_order was 
> reduced because users may be concerned why they can't now change any 
> cache's order with /sys/kernel/slab/cache/order.

It's only happen with debug_guardpage_minorder=N parameter, so
perhaps I'll just document that in kernel-parameters.txt

Thanks
Stanislaw

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
@ 2011-12-08  7:33       ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-08  7:33 UTC (permalink / raw)
  To: David Rientjes
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Wed, Dec 07, 2011 at 02:07:55PM -0800, David Rientjes wrote:
> On Fri, 18 Nov 2011, Stanislaw Gruszka wrote:
> 
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 7d2a996..a66be56 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
> >  	struct kmem_cache *temp_kmem_cache_node;
> >  	unsigned long kmalloc_size;
> >  
> > +	if (debug_guardpage_minorder())
> > +		slub_max_order = 0;
> > +
> >  	kmem_size = offsetof(struct kmem_cache, node) +
> >  				nr_node_ids * sizeof(struct kmem_cache_node *);
> > 
> 
> I'd recommend at least printing a warning about why slub_max_order was 
> reduced because users may be concerned why they can't now change any 
> cache's order with /sys/kernel/slab/cache/order.

It's only happen with debug_guardpage_minorder=N parameter, so
perhaps I'll just document that in kernel-parameters.txt

Thanks
Stanislaw

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
  2011-12-08  7:33       ` Stanislaw Gruszka
@ 2011-12-08 21:06         ` David Rientjes
  -1 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-08 21:06 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Thu, 8 Dec 2011, Stanislaw Gruszka wrote:

> > > diff --git a/mm/slub.c b/mm/slub.c
> > > index 7d2a996..a66be56 100644
> > > --- a/mm/slub.c
> > > +++ b/mm/slub.c
> > > @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
> > >  	struct kmem_cache *temp_kmem_cache_node;
> > >  	unsigned long kmalloc_size;
> > >  
> > > +	if (debug_guardpage_minorder())
> > > +		slub_max_order = 0;
> > > +
> > >  	kmem_size = offsetof(struct kmem_cache, node) +
> > >  				nr_node_ids * sizeof(struct kmem_cache_node *);
> > > 
> > 
> > I'd recommend at least printing a warning about why slub_max_order was 
> > reduced because users may be concerned why they can't now change any 
> > cache's order with /sys/kernel/slab/cache/order.
> 
> It's only happen with debug_guardpage_minorder=N parameter, so
> perhaps I'll just document that in kernel-parameters.txt
> 

SLUB will output a line in the dmesg that specifies the possible orders so 
it would be helpful to also note that those can change because of 
debug_guardpage_minorder in both Documentation/vm/slub.txt and the "order" 
file entry in Documentation/ABI/testing/sysfs-kernel-slab.

Thanks!

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0
@ 2011-12-08 21:06         ` David Rientjes
  0 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-08 21:06 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

On Thu, 8 Dec 2011, Stanislaw Gruszka wrote:

> > > diff --git a/mm/slub.c b/mm/slub.c
> > > index 7d2a996..a66be56 100644
> > > --- a/mm/slub.c
> > > +++ b/mm/slub.c
> > > @@ -3645,6 +3645,9 @@ void __init kmem_cache_init(void)
> > >  	struct kmem_cache *temp_kmem_cache_node;
> > >  	unsigned long kmalloc_size;
> > >  
> > > +	if (debug_guardpage_minorder())
> > > +		slub_max_order = 0;
> > > +
> > >  	kmem_size = offsetof(struct kmem_cache, node) +
> > >  				nr_node_ids * sizeof(struct kmem_cache_node *);
> > > 
> > 
> > I'd recommend at least printing a warning about why slub_max_order was 
> > reduced because users may be concerned why they can't now change any 
> > cache's order with /sys/kernel/slab/cache/order.
> 
> It's only happen with debug_guardpage_minorder=N parameter, so
> perhaps I'll just document that in kernel-parameters.txt
> 

SLUB will output a line in the dmesg that specifies the possible orders so 
it would be helpful to also note that those can change because of 
debug_guardpage_minorder in both Documentation/vm/slub.txt and the "order" 
file entry in Documentation/ABI/testing/sysfs-kernel-slab.

Thanks!

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
  2011-12-08 21:06         ` David Rientjes
@ 2011-12-12 14:59           ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-12 14:59 UTC (permalink / raw)
  To: David Rientjes
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
English is hard (definitely harder than C language :-), so please correct
me, if I wrote something wrong.

 Documentation/ABI/testing/sysfs-kernel-slab |    4 +++-
 Documentation/vm/slub.txt                   |    4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 8b093f8..d84ca80 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -345,7 +345,9 @@ Description:
 		allocated.  It is writable and can be changed to increase the
 		number of objects per slab.  If a slab cannot be allocated
 		because of fragmentation, SLUB will retry with the minimum order
-		possible depending on its characteristics.
+		possible depending on its characteristics. 
+		When debug_guardpage_minorder > 0 parameter is specified, the
+		minimum possible order is used and cannot be changed.
 
 What:		/sys/kernel/slab/cache/order_fallback
 Date:		April 2008
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index f464f47..dbf02ad 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -131,7 +131,9 @@ slub_min_objects.
 slub_max_order specified the order at which slub_min_objects should no
 longer be checked. This is useful to avoid SLUB trying to generate
 super large order pages to fit slub_min_objects of a slab cache with
-large object sizes into one high order page.
+large object sizes into one high order page. Setting parameter
+debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
+cause minimum possible order of slabs allocation.
 
 SLUB Debug output
 -----------------
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
@ 2011-12-12 14:59           ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-12 14:59 UTC (permalink / raw)
  To: David Rientjes
  Cc: linux-mm, linux-kernel, Mel Gorman, Andrea Arcangeli,
	Andrew Morton, Rafael J. Wysocki, Christoph Lameter

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
English is hard (definitely harder than C language :-), so please correct
me, if I wrote something wrong.

 Documentation/ABI/testing/sysfs-kernel-slab |    4 +++-
 Documentation/vm/slub.txt                   |    4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 8b093f8..d84ca80 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -345,7 +345,9 @@ Description:
 		allocated.  It is writable and can be changed to increase the
 		number of objects per slab.  If a slab cannot be allocated
 		because of fragmentation, SLUB will retry with the minimum order
-		possible depending on its characteristics.
+		possible depending on its characteristics. 
+		When debug_guardpage_minorder > 0 parameter is specified, the
+		minimum possible order is used and cannot be changed.
 
 What:		/sys/kernel/slab/cache/order_fallback
 Date:		April 2008
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index f464f47..dbf02ad 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -131,7 +131,9 @@ slub_min_objects.
 slub_max_order specified the order at which slub_min_objects should no
 longer be checked. This is useful to avoid SLUB trying to generate
 super large order pages to fit slub_min_objects of a slab cache with
-large object sizes into one high order page.
+large object sizes into one high order page. Setting parameter
+debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
+cause minimum possible order of slabs allocation.
 
 SLUB Debug output
 -----------------
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
  2011-12-12 14:59           ` Stanislaw Gruszka
@ 2011-12-12 23:21             ` Rafael J. Wysocki
  -1 siblings, 0 replies; 32+ messages in thread
From: Rafael J. Wysocki @ 2011-12-12 23:21 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: David Rientjes, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Monday, December 12, 2011, Stanislaw Gruszka wrote:
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> ---
> English is hard (definitely harder than C language :-), so please correct
> me, if I wrote something wrong.
> 
>  Documentation/ABI/testing/sysfs-kernel-slab |    4 +++-
>  Documentation/vm/slub.txt                   |    4 +++-
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
> index 8b093f8..d84ca80 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-slab
> +++ b/Documentation/ABI/testing/sysfs-kernel-slab
> @@ -345,7 +345,9 @@ Description:
>  		allocated.  It is writable and can be changed to increase the
>  		number of objects per slab.  If a slab cannot be allocated
>  		because of fragmentation, SLUB will retry with the minimum order
> -		possible depending on its characteristics.
> +		possible depending on its characteristics. 

Added trailing whitespace (please remove).

> +		When debug_guardpage_minorder > 0 parameter is specified, the
> +		minimum possible order is used and cannot be changed.

Well, I'm not sure what you wanted to say, actually?  How does one change
debug_guardpage_minorder (or specify it), for example?  Is it a kernel
command-line switch?

Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
mean that /sys/cache/slab/cache/order has no effect in that case?

>  
>  What:		/sys/kernel/slab/cache/order_fallback
>  Date:		April 2008
> diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
> index f464f47..dbf02ad 100644
> --- a/Documentation/vm/slub.txt
> +++ b/Documentation/vm/slub.txt
> @@ -131,7 +131,9 @@ slub_min_objects.
>  slub_max_order specified the order at which slub_min_objects should no
>  longer be checked. This is useful to avoid SLUB trying to generate
>  super large order pages to fit slub_min_objects of a slab cache with
> -large object sizes into one high order page.
> +large object sizes into one high order page. Setting parameter
> +debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
> +cause minimum possible order of slabs allocation.
>  
>  SLUB Debug output
>  -----------------
> 

Rafael

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
@ 2011-12-12 23:21             ` Rafael J. Wysocki
  0 siblings, 0 replies; 32+ messages in thread
From: Rafael J. Wysocki @ 2011-12-12 23:21 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: David Rientjes, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Monday, December 12, 2011, Stanislaw Gruszka wrote:
> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> ---
> English is hard (definitely harder than C language :-), so please correct
> me, if I wrote something wrong.
> 
>  Documentation/ABI/testing/sysfs-kernel-slab |    4 +++-
>  Documentation/vm/slub.txt                   |    4 +++-
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
> index 8b093f8..d84ca80 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-slab
> +++ b/Documentation/ABI/testing/sysfs-kernel-slab
> @@ -345,7 +345,9 @@ Description:
>  		allocated.  It is writable and can be changed to increase the
>  		number of objects per slab.  If a slab cannot be allocated
>  		because of fragmentation, SLUB will retry with the minimum order
> -		possible depending on its characteristics.
> +		possible depending on its characteristics. 

Added trailing whitespace (please remove).

> +		When debug_guardpage_minorder > 0 parameter is specified, the
> +		minimum possible order is used and cannot be changed.

Well, I'm not sure what you wanted to say, actually?  How does one change
debug_guardpage_minorder (or specify it), for example?  Is it a kernel
command-line switch?

Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
mean that /sys/cache/slab/cache/order has no effect in that case?

>  
>  What:		/sys/kernel/slab/cache/order_fallback
>  Date:		April 2008
> diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
> index f464f47..dbf02ad 100644
> --- a/Documentation/vm/slub.txt
> +++ b/Documentation/vm/slub.txt
> @@ -131,7 +131,9 @@ slub_min_objects.
>  slub_max_order specified the order at which slub_min_objects should no
>  longer be checked. This is useful to avoid SLUB trying to generate
>  super large order pages to fit slub_min_objects of a slab cache with
> -large object sizes into one high order page.
> +large object sizes into one high order page. Setting parameter
> +debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
> +cause minimum possible order of slabs allocation.
>  
>  SLUB Debug output
>  -----------------
> 

Rafael

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
  2011-12-12 23:21             ` Rafael J. Wysocki
@ 2011-12-14  0:41               ` David Rientjes
  -1 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-14  0:41 UTC (permalink / raw)
  To: Rafael J. Wysocki
  Cc: Stanislaw Gruszka, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Tue, 13 Dec 2011, Rafael J. Wysocki wrote:

> > diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
> > index 8b093f8..d84ca80 100644
> > --- a/Documentation/ABI/testing/sysfs-kernel-slab
> > +++ b/Documentation/ABI/testing/sysfs-kernel-slab
> > @@ -345,7 +345,9 @@ Description:
> >  		allocated.  It is writable and can be changed to increase the
> >  		number of objects per slab.  If a slab cannot be allocated
> >  		because of fragmentation, SLUB will retry with the minimum order
> > -		possible depending on its characteristics.
> > +		possible depending on its characteristics. 
> 
> Added trailing whitespace (please remove).
> 
> > +		When debug_guardpage_minorder > 0 parameter is specified, the
> > +		minimum possible order is used and cannot be changed.
> 
> Well, I'm not sure what you wanted to say, actually?  How does one change
> debug_guardpage_minorder (or specify it), for example?  Is it a kernel
> command-line switch?
> 

Yeah, we'll need a reference to Documentation/kernel-parameters.txt.

> Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
> mean that /sys/cache/slab/cache/order has no effect in that case?
> 

Good point, we should say that "this tunable" cannot be used to change the 
order at runtime if debug_guardpage_minorder is used on the command line.

Stanislaw, one more revision?

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
@ 2011-12-14  0:41               ` David Rientjes
  0 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-14  0:41 UTC (permalink / raw)
  To: Rafael J. Wysocki
  Cc: Stanislaw Gruszka, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Tue, 13 Dec 2011, Rafael J. Wysocki wrote:

> > diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
> > index 8b093f8..d84ca80 100644
> > --- a/Documentation/ABI/testing/sysfs-kernel-slab
> > +++ b/Documentation/ABI/testing/sysfs-kernel-slab
> > @@ -345,7 +345,9 @@ Description:
> >  		allocated.  It is writable and can be changed to increase the
> >  		number of objects per slab.  If a slab cannot be allocated
> >  		because of fragmentation, SLUB will retry with the minimum order
> > -		possible depending on its characteristics.
> > +		possible depending on its characteristics. 
> 
> Added trailing whitespace (please remove).
> 
> > +		When debug_guardpage_minorder > 0 parameter is specified, the
> > +		minimum possible order is used and cannot be changed.
> 
> Well, I'm not sure what you wanted to say, actually?  How does one change
> debug_guardpage_minorder (or specify it), for example?  Is it a kernel
> command-line switch?
> 

Yeah, we'll need a reference to Documentation/kernel-parameters.txt.

> Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
> mean that /sys/cache/slab/cache/order has no effect in that case?
> 

Good point, we should say that "this tunable" cannot be used to change the 
order at runtime if debug_guardpage_minorder is used on the command line.

Stanislaw, one more revision?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
  2011-12-14  0:41               ` David Rientjes
@ 2011-12-16 13:21                 ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-16 13:21 UTC (permalink / raw)
  To: David Rientjes
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Tue, Dec 13, 2011 at 04:41:34PM -0800, David Rientjes wrote:
> > > +		When debug_guardpage_minorder > 0 parameter is specified, the
> > > +		minimum possible order is used and cannot be changed.
> > 
> > Well, I'm not sure what you wanted to say, actually?  How does one change
> > debug_guardpage_minorder (or specify it), for example?  Is it a kernel
> > command-line switch?
> > 
> 
> Yeah, we'll need a reference to Documentation/kernel-parameters.txt.
> 
> > Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
> > mean that /sys/cache/slab/cache/order has no effect in that case?
> > 
> 
> Good point, we should say that "this tunable" cannot be used to change the 
> order at runtime if debug_guardpage_minorder is used on the command line.
> 
> Stanislaw, one more revision?

Ehh, I silently hoped that someone else with better English skills could
fix it ;-)

As Andrew already applied my patch (and fix whitespace) I'll post the
incremental patch in the next e-mail.

Thanks
Stanislaw

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: document setting min order with debug_guardpage_minorder > 0
@ 2011-12-16 13:21                 ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-16 13:21 UTC (permalink / raw)
  To: David Rientjes
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Andrew Morton, Christoph Lameter

On Tue, Dec 13, 2011 at 04:41:34PM -0800, David Rientjes wrote:
> > > +		When debug_guardpage_minorder > 0 parameter is specified, the
> > > +		minimum possible order is used and cannot be changed.
> > 
> > Well, I'm not sure what you wanted to say, actually?  How does one change
> > debug_guardpage_minorder (or specify it), for example?  Is it a kernel
> > command-line switch?
> > 
> 
> Yeah, we'll need a reference to Documentation/kernel-parameters.txt.
> 
> > Also I'm not sure what "cannot be changed" is supposed to mean.  Does it
> > mean that /sys/cache/slab/cache/order has no effect in that case?
> > 
> 
> Good point, we should say that "this tunable" cannot be used to change the 
> order at runtime if debug_guardpage_minorder is used on the command line.
> 
> Stanislaw, one more revision?

Ehh, I silently hoped that someone else with better English skills could
fix it ;-)

As Andrew already applied my patch (and fix whitespace) I'll post the
incremental patch in the next e-mail.

Thanks
Stanislaw

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
  2011-12-16 13:21                 ` Stanislaw Gruszka
@ 2011-12-16 13:23                   ` Stanislaw Gruszka
  -1 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-16 13:23 UTC (permalink / raw)
  To: David Rientjes, Andrew Morton
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Christoph Lameter

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 Documentation/ABI/testing/sysfs-kernel-slab |    6 ++++--
 Documentation/vm/slub.txt                   |    7 ++++---
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index bfd1d9f..91bd6ca 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -346,8 +346,10 @@ Description:
 		number of objects per slab.  If a slab cannot be allocated
 		because of fragmentation, SLUB will retry with the minimum order
 		possible depending on its characteristics.
-		When debug_guardpage_minorder > 0 parameter is specified, the
-		minimum possible order is used and cannot be changed.
+		When debug_guardpage_minorder=N (N > 0) parameter is specified
+		(see Documentation/kernel-parameters.txt), the minimum possible
+		order is used and this sysfs entry can not be used to change
+		the order at run time.
 
 What:		/sys/kernel/slab/cache/order_fallback
 Date:		April 2008
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index dbf02ad..1514d9f 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -131,9 +131,10 @@ slub_min_objects.
 slub_max_order specified the order at which slub_min_objects should no
 longer be checked. This is useful to avoid SLUB trying to generate
 super large order pages to fit slub_min_objects of a slab cache with
-large object sizes into one high order page. Setting parameter
-debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
-cause minimum possible order of slabs allocation.
+large object sizes into one high order page. Setting command line
+parameter debug_guardpage_minorder=N (N > 0), forces setting
+slub_max_order to 0, what cause minimum possible order of slabs
+allocation.
 
 SLUB Debug output
 -----------------
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
@ 2011-12-16 13:23                   ` Stanislaw Gruszka
  0 siblings, 0 replies; 32+ messages in thread
From: Stanislaw Gruszka @ 2011-12-16 13:23 UTC (permalink / raw)
  To: David Rientjes, Andrew Morton
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Christoph Lameter

Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
---
 Documentation/ABI/testing/sysfs-kernel-slab |    6 ++++--
 Documentation/vm/slub.txt                   |    7 ++++---
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index bfd1d9f..91bd6ca 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -346,8 +346,10 @@ Description:
 		number of objects per slab.  If a slab cannot be allocated
 		because of fragmentation, SLUB will retry with the minimum order
 		possible depending on its characteristics.
-		When debug_guardpage_minorder > 0 parameter is specified, the
-		minimum possible order is used and cannot be changed.
+		When debug_guardpage_minorder=N (N > 0) parameter is specified
+		(see Documentation/kernel-parameters.txt), the minimum possible
+		order is used and this sysfs entry can not be used to change
+		the order at run time.
 
 What:		/sys/kernel/slab/cache/order_fallback
 Date:		April 2008
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index dbf02ad..1514d9f 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -131,9 +131,10 @@ slub_min_objects.
 slub_max_order specified the order at which slub_min_objects should no
 longer be checked. This is useful to avoid SLUB trying to generate
 super large order pages to fit slub_min_objects of a slab cache with
-large object sizes into one high order page. Setting parameter
-debug_guardpage_minorder > 0 forces setting slub_max_order to 0, what
-cause minimum possible order of slabs allocation.
+large object sizes into one high order page. Setting command line
+parameter debug_guardpage_minorder=N (N > 0), forces setting
+slub_max_order to 0, what cause minimum possible order of slabs
+allocation.
 
 SLUB Debug output
 -----------------
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
  2011-12-16 13:23                   ` Stanislaw Gruszka
@ 2011-12-16 21:16                     ` David Rientjes
  -1 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-16 21:16 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: Andrew Morton, Rafael J. Wysocki, linux-mm, linux-kernel,
	Mel Gorman, Andrea Arcangeli, Christoph Lameter

On Fri, 16 Dec 2011, Stanislaw Gruszka wrote:

> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>

Acked-by: David Rientjes <rientjes@google.com>

Andrew, this should be folded into 
slub-document-setting-min-order-with-debug_guardpage_minorder-0.patch 
which should be folded into 
slub-min-order-when-debug_guardpage_minorder-0.patch

Thanks!

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
@ 2011-12-16 21:16                     ` David Rientjes
  0 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2011-12-16 21:16 UTC (permalink / raw)
  To: Stanislaw Gruszka
  Cc: Andrew Morton, Rafael J. Wysocki, linux-mm, linux-kernel,
	Mel Gorman, Andrea Arcangeli, Christoph Lameter

On Fri, 16 Dec 2011, Stanislaw Gruszka wrote:

> Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>

Acked-by: David Rientjes <rientjes@google.com>

Andrew, this should be folded into 
slub-document-setting-min-order-with-debug_guardpage_minorder-0.patch 
which should be folded into 
slub-min-order-when-debug_guardpage_minorder-0.patch

Thanks!

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
  2011-12-16 21:16                     ` David Rientjes
@ 2012-01-06 17:52                       ` David Rientjes
  -1 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2012-01-06 17:52 UTC (permalink / raw)
  To: Stanislaw Gruszka, Andrew Morton
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Christoph Lameter

On Fri, 16 Dec 2011, David Rientjes wrote:

> > Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> 
> Acked-by: David Rientjes <rientjes@google.com>
> 
> Andrew, this should be folded into 
> slub-document-setting-min-order-with-debug_guardpage_minorder-0.patch 
> which should be folded into 
> slub-min-order-when-debug_guardpage_minorder-0.patch
> 

Andrew, this documentation improvement is still missing from linux-next.

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH -mm] slub: debug_guardpage_minorder documentation tweak
@ 2012-01-06 17:52                       ` David Rientjes
  0 siblings, 0 replies; 32+ messages in thread
From: David Rientjes @ 2012-01-06 17:52 UTC (permalink / raw)
  To: Stanislaw Gruszka, Andrew Morton
  Cc: Rafael J. Wysocki, linux-mm, linux-kernel, Mel Gorman,
	Andrea Arcangeli, Christoph Lameter

On Fri, 16 Dec 2011, David Rientjes wrote:

> > Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
> 
> Acked-by: David Rientjes <rientjes@google.com>
> 
> Andrew, this should be folded into 
> slub-document-setting-min-order-with-debug_guardpage_minorder-0.patch 
> which should be folded into 
> slub-min-order-when-debug_guardpage_minorder-0.patch
> 

Andrew, this documentation improvement is still missing from linux-next.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2012-01-06 17:53 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-11-18 16:25 [PATCH v2 1/3] mm: more intensive memory corruption debug Stanislaw Gruszka
2011-11-18 16:25 ` Stanislaw Gruszka
2011-11-18 16:25 ` [PATCH v2 2/3] PM / Hibernate : do not count debug pages as savable Stanislaw Gruszka
2011-11-18 16:25   ` Stanislaw Gruszka
2011-11-18 16:25 ` [PATCH v3 3/3] slub: min order when debug_guardpage_minorder > 0 Stanislaw Gruszka
2011-11-18 16:25   ` Stanislaw Gruszka
2011-11-21 17:15   ` Christoph Lameter
2011-11-21 17:15     ` Christoph Lameter
2011-12-07 22:07   ` David Rientjes
2011-12-07 22:07     ` David Rientjes
2011-12-08  7:33     ` Stanislaw Gruszka
2011-12-08  7:33       ` Stanislaw Gruszka
2011-12-08 21:06       ` David Rientjes
2011-12-08 21:06         ` David Rientjes
2011-12-12 14:59         ` [PATCH -mm] slub: document setting min order with " Stanislaw Gruszka
2011-12-12 14:59           ` Stanislaw Gruszka
2011-12-12 23:21           ` Rafael J. Wysocki
2011-12-12 23:21             ` Rafael J. Wysocki
2011-12-14  0:41             ` David Rientjes
2011-12-14  0:41               ` David Rientjes
2011-12-16 13:21               ` Stanislaw Gruszka
2011-12-16 13:21                 ` Stanislaw Gruszka
2011-12-16 13:23                 ` [PATCH -mm] slub: debug_guardpage_minorder documentation tweak Stanislaw Gruszka
2011-12-16 13:23                   ` Stanislaw Gruszka
2011-12-16 21:16                   ` David Rientjes
2011-12-16 21:16                     ` David Rientjes
2012-01-06 17:52                     ` David Rientjes
2012-01-06 17:52                       ` David Rientjes
2011-11-22 21:56 ` [PATCH v2 1/3] mm: more intensive memory corruption debug Andrew Morton
2011-11-22 21:56   ` Andrew Morton
2011-11-23 13:25   ` Stanislaw Gruszka
2011-11-23 13:25     ` Stanislaw Gruszka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.