linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
@ 2012-09-11  0:41 Minchan Kim
  2012-09-11  8:59 ` Mel Gorman
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Minchan Kim @ 2012-09-11  0:41 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Kyungmin Park, Minchan Kim,
	Marek Szyprowski, Michal Nazarewicz, Rik van Riel, Mel Gorman

This patch drops clean cache pages instead of migration during
alloc_contig_range() to minimise allocation latency by reducing the amount
of migration is necessary. It's useful for CMA because latency of migration
is more important than evicting the background processes working set.
In addition, as pages are reclaimed then fewer free pages for migration
targets are required so it avoids memory reclaiming to get free pages,
which is a contributory factor to increased latency.

* from v1
  * drop migrate_mode_t
  * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support - Mel

I measured elapsed time of __alloc_contig_migrate_range which migrates
10M in 40M movable zone in QEMU machine.

Before - 146ms, After - 7ms

Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
Andrew, this patch is based on (mmotm-2012-09-06-16-46 -
drop mm-support-migrate_discard.patch removed from -mm tree +
drop mm-support-migrate_discard.patch removed from -mm tree)

 mm/internal.h   |    3 ++-
 mm/page_alloc.c |    2 ++
 mm/vmscan.c     |   43 +++++++++++++++++++++++++++++++++++++------
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index bbd7b34..8312d4f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long);
 
 extern void set_pageblock_order(void);
-
+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+					    struct list_head *page_list);
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 941b6ac..48b63d9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5705,6 +5705,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
 			break;
 		}
 
+		reclaim_clean_pages_from_list(cc.zone, &cc.migratepages);
+
 		ret = migrate_pages(&cc.migratepages,
 				    __alloc_contig_migrate_alloc,
 				    0, false, MIGRATE_SYNC);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d16bf5a..f8f56f8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
 static unsigned long shrink_page_list(struct list_head *page_list,
 				      struct zone *zone,
 				      struct scan_control *sc,
+				      enum ttu_flags ttu_flags,
 				      unsigned long *ret_nr_dirty,
-				      unsigned long *ret_nr_writeback)
+				      unsigned long *ret_nr_writeback,
+				      bool force_reclaim)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
 	mem_cgroup_uncharge_start();
 	while (!list_empty(page_list)) {
-		enum page_references references;
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
+		enum page_references references = PAGEREF_RECLAIM;
 
 		cond_resched();
 
@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			wait_on_page_writeback(page);
 		}
 
-		references = page_check_references(page, sc);
+		if (!force_reclaim)
+			references = page_check_references(page, sc);
+
 		switch (references) {
 		case PAGEREF_ACTIVATE:
 			goto activate_locked;
@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		 * processes. Try to unmap it here.
 		 */
 		if (page_mapped(page) && mapping) {
-			switch (try_to_unmap(page, TTU_UNMAP)) {
+			switch (try_to_unmap(page, ttu_flags)) {
 			case SWAP_FAIL:
 				goto activate_locked;
 			case SWAP_AGAIN:
@@ -960,6 +964,33 @@ keep:
 	return nr_reclaimed;
 }
 
+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+					    struct list_head *page_list)
+{
+	struct scan_control sc = {
+		.gfp_mask = GFP_KERNEL,
+		.priority = DEF_PRIORITY,
+		.may_unmap = 1,
+	};
+	unsigned long ret, dummy1, dummy2;
+	struct page *page, *next;
+	LIST_HEAD(clean_pages);
+
+	list_for_each_entry_safe(page, next, page_list, lru) {
+		if (page_is_file_cache(page) && !PageDirty(page)) {
+			ClearPageActive(page);
+			list_move(&page->lru, &clean_pages);
+		}
+	}
+
+	ret = shrink_page_list(&clean_pages, zone, &sc,
+				TTU_UNMAP|TTU_IGNORE_ACCESS,
+				&dummy1, &dummy2, true);
+	list_splice(&clean_pages, page_list);
+	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
+	return ret;
+}
+
 /*
  * Attempt to remove the specified page from its LRU.  Only take this page
  * if it is of the appropriate PageActive status.  Pages which are being
@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	if (nr_taken == 0)
 		return 0;
 
-	nr_reclaimed = shrink_page_list(&page_list, zone, sc,
-						&nr_dirty, &nr_writeback);
+	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
+					&nr_dirty, &nr_writeback, false);
 
 	spin_lock_irq(&zone->lru_lock);
 
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-11  0:41 [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration Minchan Kim
@ 2012-09-11  8:59 ` Mel Gorman
  2012-09-11 11:15 ` Michal Nazarewicz
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 9+ messages in thread
From: Mel Gorman @ 2012-09-11  8:59 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, linux-kernel, linux-mm, Kyungmin Park,
	Marek Szyprowski, Michal Nazarewicz, Rik van Riel

On Tue, Sep 11, 2012 at 09:41:52AM +0900, Minchan Kim wrote:
> This patch drops clean cache pages instead of migration during
> alloc_contig_range() to minimise allocation latency by reducing the amount
> of migration is necessary. It's useful for CMA because latency of migration
> is more important than evicting the background processes working set.
> In addition, as pages are reclaimed then fewer free pages for migration
> targets are required so it avoids memory reclaiming to get free pages,
> which is a contributory factor to increased latency.
> 
> * from v1
>   * drop migrate_mode_t
>   * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support - Mel
> 
> I measured elapsed time of __alloc_contig_migrate_range which migrates
> 10M in 40M movable zone in QEMU machine.
> 
> Before - 146ms, After - 7ms
> 
> Cc: Marek Szyprowski <m.szyprowski@samsung.com>
> Cc: Michal Nazarewicz <mina86@mina86.com>
> Cc: Rik van Riel <riel@redhat.com>
> Signed-off-by: Mel Gorman <mgorman@suse.de>
> Signed-off-by: Minchan Kim <minchan@kernel.org>

My signed-off is already on this but in earlier versions I was still
asking for changes. This time the patch looks good to me so even though
it is a bit redundant.

Reviewed-by: Mel Gorman <mgorman@suse.de>

Thanks Minchan.

-- 
Mel Gorman
SUSE Labs

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-11  0:41 [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration Minchan Kim
  2012-09-11  8:59 ` Mel Gorman
@ 2012-09-11 11:15 ` Michal Nazarewicz
  2012-09-11 11:23   ` Kyungmin Park
  2012-09-12 20:07 ` Andrew Morton
  2012-09-13 19:17 ` Geert Uytterhoeven
  3 siblings, 1 reply; 9+ messages in thread
From: Michal Nazarewicz @ 2012-09-11 11:15 UTC (permalink / raw)
  To: Minchan Kim, Andrew Morton
  Cc: linux-kernel, linux-mm, Kyungmin Park, Minchan Kim,
	Marek Szyprowski, Rik van Riel, Mel Gorman

[-- Attachment #1: Type: text/plain, Size: 1252 bytes --]


On Tue, Sep 11 2012, Minchan Kim wrote:
> This patch drops clean cache pages instead of migration during
> alloc_contig_range() to minimise allocation latency by reducing the amount
> of migration is necessary. It's useful for CMA because latency of migration
> is more important than evicting the background processes working set.
> In addition, as pages are reclaimed then fewer free pages for migration
> targets are required so it avoids memory reclaiming to get free pages,
> which is a contributory factor to increased latency.
>
> * from v1
>   * drop migrate_mode_t
>   * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support - Mel
>
> I measured elapsed time of __alloc_contig_migrate_range which migrates
> 10M in 40M movable zone in QEMU machine.
>
> Before - 146ms, After - 7ms
>
> Cc: Marek Szyprowski <m.szyprowski@samsung.com>
> Cc: Michal Nazarewicz <mina86@mina86.com>

Acked-by: Michal Nazarewicz <mina86@mina86.com>

Thanks!

-- 
Best regards,                                         _     _
.o. | Liege of Serenely Enlightened Majesty of      o' \,=./ `o
..o | Computer Science,  Michał “mina86” Nazarewicz    (o o)
ooo +----<email/xmpp: mpn@google.com>--------------ooO--(_)--Ooo--

[-- Attachment #2.1: Type: text/plain, Size: 0 bytes --]



[-- Attachment #2.2: Type: application/pgp-signature, Size: 835 bytes --]

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-11 11:15 ` Michal Nazarewicz
@ 2012-09-11 11:23   ` Kyungmin Park
  0 siblings, 0 replies; 9+ messages in thread
From: Kyungmin Park @ 2012-09-11 11:23 UTC (permalink / raw)
  To: Michal Nazarewicz
  Cc: Minchan Kim, Andrew Morton, linux-kernel, linux-mm,
	Marek Szyprowski, Rik van Riel, Mel Gorman

On 9/11/12, Michal Nazarewicz <mina86@mina86.com> wrote:
>
> On Tue, Sep 11 2012, Minchan Kim wrote:
>> This patch drops clean cache pages instead of migration during
>> alloc_contig_range() to minimise allocation latency by reducing the
>> amount
>> of migration is necessary. It's useful for CMA because latency of
>> migration
>> is more important than evicting the background processes working set.
>> In addition, as pages are reclaimed then fewer free pages for migration
>> targets are required so it avoids memory reclaiming to get free pages,
>> which is a contributory factor to increased latency.
>>
>> * from v1
>>   * drop migrate_mode_t
>>   * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support -
>> Mel
>>
>> I measured elapsed time of __alloc_contig_migrate_range which migrates
>> 10M in 40M movable zone in QEMU machine.
>>
>> Before - 146ms, After - 7ms
>>
>> Cc: Marek Szyprowski <m.szyprowski@samsung.com>
>> Cc: Michal Nazarewicz <mina86@mina86.com>
>
> Acked-by: Michal Nazarewicz <mina86@mina86.com>
Tested-by: Kyungmin Park <kyungmin.park@samsung.com>
>
> Thanks!
>
> --
> Best regards,                                         _     _
> .o. | Liege of Serenely Enlightened Majesty of      o' \,=./ `o
> ..o | Computer Science,  Michał “mina86” Nazarewicz    (o o)
> ooo +----<email/xmpp: mpn@google.com>--------------ooO--(_)--Ooo--

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-11  0:41 [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration Minchan Kim
  2012-09-11  8:59 ` Mel Gorman
  2012-09-11 11:15 ` Michal Nazarewicz
@ 2012-09-12 20:07 ` Andrew Morton
  2012-09-12 23:58   ` Minchan Kim
  2012-09-13 19:17 ` Geert Uytterhoeven
  3 siblings, 1 reply; 9+ messages in thread
From: Andrew Morton @ 2012-09-12 20:07 UTC (permalink / raw)
  To: Minchan Kim
  Cc: linux-kernel, linux-mm, Kyungmin Park, Marek Szyprowski,
	Michal Nazarewicz, Rik van Riel, Mel Gorman

On Tue, 11 Sep 2012 09:41:52 +0900
Minchan Kim <minchan@kernel.org> wrote:

> This patch drops clean cache pages instead of migration during
> alloc_contig_range() to minimise allocation latency by reducing the amount
> of migration is necessary. It's useful for CMA because latency of migration
> is more important than evicting the background processes working set.
> In addition, as pages are reclaimed then fewer free pages for migration
> targets are required so it avoids memory reclaiming to get free pages,
> which is a contributory factor to increased latency.
> 
> * from v1
>   * drop migrate_mode_t
>   * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support - Mel
> 
> I measured elapsed time of __alloc_contig_migrate_range which migrates
> 10M in 40M movable zone in QEMU machine.
> 
> Before - 146ms, After - 7ms
> 
> ...
>
> @@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  			wait_on_page_writeback(page);
>  		}
>  
> -		references = page_check_references(page, sc);
> +		if (!force_reclaim)
> +			references = page_check_references(page, sc);

grumble.  Could we please document `enum page_references' and
page_check_references()?

And the `force_reclaim' arg could do with some documentation.  It only
forces reclaim under certain circumstances.  They should be described,
and a reson should be provided.

Why didn't this patch use PAGEREF_RECLAIM_CLEAN?  It is possible for
someone to dirty one of these pages after we tested its cleanness and
we'll then go off and write it out, but we won't be reclaiming it?

>
> ...
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-12 20:07 ` Andrew Morton
@ 2012-09-12 23:58   ` Minchan Kim
  0 siblings, 0 replies; 9+ messages in thread
From: Minchan Kim @ 2012-09-12 23:58 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-kernel, linux-mm, Kyungmin Park, Marek Szyprowski,
	Michal Nazarewicz, Rik van Riel, Mel Gorman

On Wed, Sep 12, 2012 at 01:07:32PM -0700, Andrew Morton wrote:
> On Tue, 11 Sep 2012 09:41:52 +0900
> Minchan Kim <minchan@kernel.org> wrote:
> 
> > This patch drops clean cache pages instead of migration during
> > alloc_contig_range() to minimise allocation latency by reducing the amount
> > of migration is necessary. It's useful for CMA because latency of migration
> > is more important than evicting the background processes working set.
> > In addition, as pages are reclaimed then fewer free pages for migration
> > targets are required so it avoids memory reclaiming to get free pages,
> > which is a contributory factor to increased latency.
> > 
> > * from v1
> >   * drop migrate_mode_t
> >   * add reclaim_clean_pages_from_list instad of MIGRATE_DISCARD support - Mel
> > 
> > I measured elapsed time of __alloc_contig_migrate_range which migrates
> > 10M in 40M movable zone in QEMU machine.
> > 
> > Before - 146ms, After - 7ms
> > 
> > ...
> >
> > @@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
> >  			wait_on_page_writeback(page);
> >  		}
> >  
> > -		references = page_check_references(page, sc);
> > +		if (!force_reclaim)
> > +			references = page_check_references(page, sc);
> 
> grumble.  Could we please document `enum page_references' and
> page_check_references()?
> 
> And the `force_reclaim' arg could do with some documentation.  It only
> forces reclaim under certain circumstances.  They should be described,
> and a reson should be provided.

I will give it a shot by another patch.

> 
> Why didn't this patch use PAGEREF_RECLAIM_CLEAN?  It is possible for
> someone to dirty one of these pages after we tested its cleanness and
> we'll then go off and write it out, but we won't be reclaiming it?

Absolutely.
Thanks Andrew!

Here it goes.

====== 8< ======

>From 90022feb9ecf8e9a4efba7cbf49d7cead777020f Mon Sep 17 00:00:00 2001
From: Minchan Kim <minchan@kernel.org>
Date: Thu, 13 Sep 2012 08:45:58 +0900
Subject: [PATCH] mm: cma: reclaim only clean pages

It is possible for pages to be dirty after the check
in reclaim_clean_pages_from_list so that it ends up
paging out the pages, which is never what we want for speed up.

This patch fixes it.

Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/vmscan.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index f8f56f8..1ee4b69 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -694,7 +694,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
-		enum page_references references = PAGEREF_RECLAIM;
+		enum page_references references = PAGEREF_RECLAIM_CLEAN;
 
 		cond_resched();
 
-- 
1.7.9.5

-- 
Kind regards,
Minchan Kim

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-11  0:41 [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration Minchan Kim
                   ` (2 preceding siblings ...)
  2012-09-12 20:07 ` Andrew Morton
@ 2012-09-13 19:17 ` Geert Uytterhoeven
  2012-09-13 22:19   ` Andrew Morton
  3 siblings, 1 reply; 9+ messages in thread
From: Geert Uytterhoeven @ 2012-09-13 19:17 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, linux-kernel, linux-mm, Kyungmin Park,
	Marek Szyprowski, Michal Nazarewicz, Rik van Riel, Mel Gorman,
	Linux-Next

On Tue, Sep 11, 2012 at 2:41 AM, Minchan Kim <minchan@kernel.org> wrote:
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
>  static unsigned long shrink_page_list(struct list_head *page_list,
>                                       struct zone *zone,
>                                       struct scan_control *sc,
> +                                     enum ttu_flags ttu_flags,

"enum ttu_flags" is defined on CONFIG_MMU=y only, causing on nommu:

mm/vmscan.c:677:26: error: parameter 4 ('ttu_flags') has incomplete type
mm/vmscan.c:987:5: error: 'TTU_UNMAP' undeclared (first use in this function)
mm/vmscan.c:987:15: error: 'TTU_IGNORE_ACCESS' undeclared (first use
in this function)
mm/vmscan.c:1312:56: error: 'TTU_UNMAP' undeclared (first use in this function)

E.g.
http://kisskb.ellerman.id.au/kisskb/buildresult/7191694/ (h8300-defconfig)
http://kisskb.ellerman.id.au/kisskb/buildresult/7191858/ (sh-allnoconfig)

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-13 19:17 ` Geert Uytterhoeven
@ 2012-09-13 22:19   ` Andrew Morton
  2012-09-14  0:10     ` Minchan Kim
  0 siblings, 1 reply; 9+ messages in thread
From: Andrew Morton @ 2012-09-13 22:19 UTC (permalink / raw)
  To: Geert Uytterhoeven
  Cc: Minchan Kim, linux-kernel, linux-mm, Kyungmin Park,
	Marek Szyprowski, Michal Nazarewicz, Rik van Riel, Mel Gorman,
	Linux-Next

On Thu, 13 Sep 2012 21:17:19 +0200
Geert Uytterhoeven <geert@linux-m68k.org> wrote:

> On Tue, Sep 11, 2012 at 2:41 AM, Minchan Kim <minchan@kernel.org> wrote:
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
> >  static unsigned long shrink_page_list(struct list_head *page_list,
> >                                       struct zone *zone,
> >                                       struct scan_control *sc,
> > +                                     enum ttu_flags ttu_flags,
> 
> "enum ttu_flags" is defined on CONFIG_MMU=y only, causing on nommu:
> 
> mm/vmscan.c:677:26: error: parameter 4 ('ttu_flags') has incomplete type
> mm/vmscan.c:987:5: error: 'TTU_UNMAP' undeclared (first use in this function)
> mm/vmscan.c:987:15: error: 'TTU_IGNORE_ACCESS' undeclared (first use
> in this function)
> mm/vmscan.c:1312:56: error: 'TTU_UNMAP' undeclared (first use in this function)
> 
> E.g.
> http://kisskb.ellerman.id.au/kisskb/buildresult/7191694/ (h8300-defconfig)
> http://kisskb.ellerman.id.au/kisskb/buildresult/7191858/ (sh-allnoconfig)

hm, OK, the means by which current mainline avoids build errors is
either clever or lucky.

			switch (try_to_unmap(page, TTU_UNMAP)) {

gets preprocessed into

			switch (2) {

so the cmopiler never gets to see the TTU_ symbol at all.  Because it
happens to be inside the try_to_unmap() call.


I guess we can just make ttu_flags visible to NOMMU:


--- a/include/linux/rmap.h~mm-cma-discard-clean-pages-during-contiguous-allocation-instead-of-migration-fix-fix
+++ a/include/linux/rmap.h
@@ -71,6 +71,17 @@ struct anon_vma_chain {
 #endif
 };
 
+enum ttu_flags {
+	TTU_UNMAP = 0,			/* unmap mode */
+	TTU_MIGRATION = 1,		/* migration mode */
+	TTU_MUNLOCK = 2,		/* munlock mode */
+	TTU_ACTION_MASK = 0xff,
+
+	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
+	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
+	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+};
+
 #ifdef CONFIG_MMU
 static inline void get_anon_vma(struct anon_vma *anon_vma)
 {
@@ -164,16 +175,6 @@ int page_referenced(struct page *, int i
 int page_referenced_one(struct page *, struct vm_area_struct *,
 	unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
 
-enum ttu_flags {
-	TTU_UNMAP = 0,			/* unmap mode */
-	TTU_MIGRATION = 1,		/* migration mode */
-	TTU_MUNLOCK = 2,		/* munlock mode */
-	TTU_ACTION_MASK = 0xff,
-
-	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
-	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
-	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
-};
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
 int try_to_unmap(struct page *, enum ttu_flags flags);
_


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration
  2012-09-13 22:19   ` Andrew Morton
@ 2012-09-14  0:10     ` Minchan Kim
  0 siblings, 0 replies; 9+ messages in thread
From: Minchan Kim @ 2012-09-14  0:10 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Geert Uytterhoeven, linux-kernel, linux-mm, Kyungmin Park,
	Marek Szyprowski, Michal Nazarewicz, Rik van Riel, Mel Gorman,
	Linux-Next

On Thu, Sep 13, 2012 at 03:19:22PM -0700, Andrew Morton wrote:
> On Thu, 13 Sep 2012 21:17:19 +0200
> Geert Uytterhoeven <geert@linux-m68k.org> wrote:
> 
> > On Tue, Sep 11, 2012 at 2:41 AM, Minchan Kim <minchan@kernel.org> wrote:
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
> > >  static unsigned long shrink_page_list(struct list_head *page_list,
> > >                                       struct zone *zone,
> > >                                       struct scan_control *sc,
> > > +                                     enum ttu_flags ttu_flags,
> > 
> > "enum ttu_flags" is defined on CONFIG_MMU=y only, causing on nommu:
> > 
> > mm/vmscan.c:677:26: error: parameter 4 ('ttu_flags') has incomplete type
> > mm/vmscan.c:987:5: error: 'TTU_UNMAP' undeclared (first use in this function)
> > mm/vmscan.c:987:15: error: 'TTU_IGNORE_ACCESS' undeclared (first use
> > in this function)
> > mm/vmscan.c:1312:56: error: 'TTU_UNMAP' undeclared (first use in this function)
> > 
> > E.g.
> > http://kisskb.ellerman.id.au/kisskb/buildresult/7191694/ (h8300-defconfig)
> > http://kisskb.ellerman.id.au/kisskb/buildresult/7191858/ (sh-allnoconfig)
> 
> hm, OK, the means by which current mainline avoids build errors is
> either clever or lucky.
> 
> 			switch (try_to_unmap(page, TTU_UNMAP)) {
> 
> gets preprocessed into
> 
> 			switch (2) {
> 
> so the cmopiler never gets to see the TTU_ symbol at all.  Because it
> happens to be inside the try_to_unmap() call.
> 
> 
> I guess we can just make ttu_flags visible to NOMMU:

I agree.

Geert, Andrew
Thanks for the reporting and quick fix!

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2012-09-14  0:08 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-09-11  0:41 [PATCH] mm: cma: Discard clean pages during contiguous allocation instead of migration Minchan Kim
2012-09-11  8:59 ` Mel Gorman
2012-09-11 11:15 ` Michal Nazarewicz
2012-09-11 11:23   ` Kyungmin Park
2012-09-12 20:07 ` Andrew Morton
2012-09-12 23:58   ` Minchan Kim
2012-09-13 19:17 ` Geert Uytterhoeven
2012-09-13 22:19   ` Andrew Morton
2012-09-14  0:10     ` Minchan Kim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).