From: Michal Hocko <mhocko@suse.cz> To: Johannes Weiner <jweiner@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>, KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>, Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>, Balbir Singh <bsingharora@gmail.com>, Ying Han <yinghan@google.com>, Greg Thelen <gthelen@google.com>, Michel Lespinasse <walken@google.com>, Rik van Riel <riel@redhat.com>, Minchan Kim <minchan.kim@gmail.com>, Christoph Hellwig <hch@infradead.org>, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: Re: [patch 11/11] mm: memcg: remove unused node/section info from pc->flags Date: Wed, 21 Sep 2011 17:32:49 +0200 [thread overview] Message-ID: <20110921153249.GJ8501@tiehlicka.suse.cz> (raw) In-Reply-To: <1315825048-3437-12-git-send-email-jweiner@redhat.com> On Mon 12-09-11 12:57:28, Johannes Weiner wrote: > To find the page corresponding to a certain page_cgroup, the pc->flags > encoded the node or section ID with the base array to compare the pc > pointer to. > > Now that the per-memory cgroup LRU lists link page descriptors > directly, there is no longer any code that knows the page_cgroup but > not the page. > > Signed-off-by: Johannes Weiner <jweiner@redhat.com> Nice. Reviewed-by: Michal Hocko <mhocko@suse.cz> > --- > include/linux/page_cgroup.h | 33 ------------------------ > mm/page_cgroup.c | 58 ++++++------------------------------------- > 2 files changed, 8 insertions(+), 83 deletions(-) > > diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h > index 5bae753..aaa60da 100644 > --- a/include/linux/page_cgroup.h > +++ b/include/linux/page_cgroup.h > @@ -121,39 +121,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc, > local_irq_restore(*flags); > } > > -#ifdef CONFIG_SPARSEMEM > -#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT > -#else > -#define PCG_ARRAYID_WIDTH NODES_SHIFT > -#endif > - > -#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS) > -#error Not enough space left in pc->flags to store page_cgroup array IDs > -#endif > - > -/* pc->flags: ARRAY-ID | FLAGS */ > - > -#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1) > - > -#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH) > -/* > - * Zero the shift count for non-existent fields, to prevent compiler > - * warnings and ensure references are optimized away. > - */ > -#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0)) > - > -static inline void set_page_cgroup_array_id(struct page_cgroup *pc, > - unsigned long id) > -{ > - pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT); > - pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT; > -} > - > -static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc) > -{ > - return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK; > -} > - > #else /* CONFIG_CGROUP_MEM_RES_CTLR */ > struct page_cgroup; > > diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c > index 256dee8..2601a65 100644 > --- a/mm/page_cgroup.c > +++ b/mm/page_cgroup.c > @@ -11,12 +11,6 @@ > #include <linux/swapops.h> > #include <linux/kmemleak.h> > > -static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id) > -{ > - pc->flags = 0; > - set_page_cgroup_array_id(pc, id); > - pc->mem_cgroup = NULL; > -} > static unsigned long total_usage; > > #if !defined(CONFIG_SPARSEMEM) > @@ -41,24 +35,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) > return base + offset; > } > > -struct page *lookup_cgroup_page(struct page_cgroup *pc) > -{ > - unsigned long pfn; > - struct page *page; > - pg_data_t *pgdat; > - > - pgdat = NODE_DATA(page_cgroup_array_id(pc)); > - pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn; > - page = pfn_to_page(pfn); > - VM_BUG_ON(pc != lookup_page_cgroup(page)); > - return page; > -} > - > static int __init alloc_node_page_cgroup(int nid) > { > - struct page_cgroup *base, *pc; > + struct page_cgroup *base; > unsigned long table_size; > - unsigned long start_pfn, nr_pages, index; > + unsigned long nr_pages; > > start_pfn = NODE_DATA(nid)->node_start_pfn; > nr_pages = NODE_DATA(nid)->node_spanned_pages; > @@ -72,10 +53,6 @@ static int __init alloc_node_page_cgroup(int nid) > table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); > if (!base) > return -ENOMEM; > - for (index = 0; index < nr_pages; index++) { > - pc = base + index; > - init_page_cgroup(pc, nid); > - } > NODE_DATA(nid)->node_page_cgroup = base; > total_usage += table_size; > return 0; > @@ -116,31 +93,19 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) > return section->page_cgroup + pfn; > } > > -struct page *lookup_cgroup_page(struct page_cgroup *pc) > -{ > - struct mem_section *section; > - struct page *page; > - unsigned long nr; > - > - nr = page_cgroup_array_id(pc); > - section = __nr_to_section(nr); > - page = pfn_to_page(pc - section->page_cgroup); > - VM_BUG_ON(pc != lookup_page_cgroup(page)); > - return page; > -} > - > static void *__meminit alloc_page_cgroup(size_t size, int nid) > { > void *addr = NULL; > > - addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); > + addr = alloc_pages_exact_nid(nid, size, > + GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); > if (addr) > return addr; > > if (node_state(nid, N_HIGH_MEMORY)) > - addr = vmalloc_node(size, nid); > + addr = vzalloc_node(size, nid); > else > - addr = vmalloc(size); > + addr = vzalloc(size); > > return addr; > } > @@ -163,14 +128,11 @@ static void free_page_cgroup(void *addr) > > static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) > { > - struct page_cgroup *base, *pc; > struct mem_section *section; > + struct page_cgroup *base; > unsigned long table_size; > - unsigned long nr; > - int index; > > - nr = pfn_to_section_nr(pfn); > - section = __nr_to_section(nr); > + section = __pfn_to_section(pfn); > > if (section->page_cgroup) > return 0; > @@ -190,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) > return -ENOMEM; > } > > - for (index = 0; index < PAGES_PER_SECTION; index++) { > - pc = base + index; > - init_page_cgroup(pc, nr); > - } > /* > * The passed "pfn" may not be aligned to SECTION. For the calculation > * we need to apply a mask. > -- > 1.7.6 > > -- > To unsubscribe, send a message with 'unsubscribe linux-mm' in > the body to majordomo@kvack.org. For more info on Linux MM, > see: http://www.linux-mm.org/ . > Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ > Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> -- Michal Hocko SUSE Labs SUSE LINUX s.r.o. Lihovarska 1060/12 190 00 Praha 9 Czech Republic
WARNING: multiple messages have this Message-ID (diff)
From: Michal Hocko <mhocko@suse.cz> To: Johannes Weiner <jweiner@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>, KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>, Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>, Balbir Singh <bsingharora@gmail.com>, Ying Han <yinghan@google.com>, Greg Thelen <gthelen@google.com>, Michel Lespinasse <walken@google.com>, Rik van Riel <riel@redhat.com>, Minchan Kim <minchan.kim@gmail.com>, Christoph Hellwig <hch@infradead.org>, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: Re: [patch 11/11] mm: memcg: remove unused node/section info from pc->flags Date: Wed, 21 Sep 2011 17:32:49 +0200 [thread overview] Message-ID: <20110921153249.GJ8501@tiehlicka.suse.cz> (raw) In-Reply-To: <1315825048-3437-12-git-send-email-jweiner@redhat.com> On Mon 12-09-11 12:57:28, Johannes Weiner wrote: > To find the page corresponding to a certain page_cgroup, the pc->flags > encoded the node or section ID with the base array to compare the pc > pointer to. > > Now that the per-memory cgroup LRU lists link page descriptors > directly, there is no longer any code that knows the page_cgroup but > not the page. > > Signed-off-by: Johannes Weiner <jweiner@redhat.com> Nice. Reviewed-by: Michal Hocko <mhocko@suse.cz> > --- > include/linux/page_cgroup.h | 33 ------------------------ > mm/page_cgroup.c | 58 ++++++------------------------------------- > 2 files changed, 8 insertions(+), 83 deletions(-) > > diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h > index 5bae753..aaa60da 100644 > --- a/include/linux/page_cgroup.h > +++ b/include/linux/page_cgroup.h > @@ -121,39 +121,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc, > local_irq_restore(*flags); > } > > -#ifdef CONFIG_SPARSEMEM > -#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT > -#else > -#define PCG_ARRAYID_WIDTH NODES_SHIFT > -#endif > - > -#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS) > -#error Not enough space left in pc->flags to store page_cgroup array IDs > -#endif > - > -/* pc->flags: ARRAY-ID | FLAGS */ > - > -#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1) > - > -#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH) > -/* > - * Zero the shift count for non-existent fields, to prevent compiler > - * warnings and ensure references are optimized away. > - */ > -#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0)) > - > -static inline void set_page_cgroup_array_id(struct page_cgroup *pc, > - unsigned long id) > -{ > - pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT); > - pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT; > -} > - > -static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc) > -{ > - return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK; > -} > - > #else /* CONFIG_CGROUP_MEM_RES_CTLR */ > struct page_cgroup; > > diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c > index 256dee8..2601a65 100644 > --- a/mm/page_cgroup.c > +++ b/mm/page_cgroup.c > @@ -11,12 +11,6 @@ > #include <linux/swapops.h> > #include <linux/kmemleak.h> > > -static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id) > -{ > - pc->flags = 0; > - set_page_cgroup_array_id(pc, id); > - pc->mem_cgroup = NULL; > -} > static unsigned long total_usage; > > #if !defined(CONFIG_SPARSEMEM) > @@ -41,24 +35,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) > return base + offset; > } > > -struct page *lookup_cgroup_page(struct page_cgroup *pc) > -{ > - unsigned long pfn; > - struct page *page; > - pg_data_t *pgdat; > - > - pgdat = NODE_DATA(page_cgroup_array_id(pc)); > - pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn; > - page = pfn_to_page(pfn); > - VM_BUG_ON(pc != lookup_page_cgroup(page)); > - return page; > -} > - > static int __init alloc_node_page_cgroup(int nid) > { > - struct page_cgroup *base, *pc; > + struct page_cgroup *base; > unsigned long table_size; > - unsigned long start_pfn, nr_pages, index; > + unsigned long nr_pages; > > start_pfn = NODE_DATA(nid)->node_start_pfn; > nr_pages = NODE_DATA(nid)->node_spanned_pages; > @@ -72,10 +53,6 @@ static int __init alloc_node_page_cgroup(int nid) > table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); > if (!base) > return -ENOMEM; > - for (index = 0; index < nr_pages; index++) { > - pc = base + index; > - init_page_cgroup(pc, nid); > - } > NODE_DATA(nid)->node_page_cgroup = base; > total_usage += table_size; > return 0; > @@ -116,31 +93,19 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) > return section->page_cgroup + pfn; > } > > -struct page *lookup_cgroup_page(struct page_cgroup *pc) > -{ > - struct mem_section *section; > - struct page *page; > - unsigned long nr; > - > - nr = page_cgroup_array_id(pc); > - section = __nr_to_section(nr); > - page = pfn_to_page(pc - section->page_cgroup); > - VM_BUG_ON(pc != lookup_page_cgroup(page)); > - return page; > -} > - > static void *__meminit alloc_page_cgroup(size_t size, int nid) > { > void *addr = NULL; > > - addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); > + addr = alloc_pages_exact_nid(nid, size, > + GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); > if (addr) > return addr; > > if (node_state(nid, N_HIGH_MEMORY)) > - addr = vmalloc_node(size, nid); > + addr = vzalloc_node(size, nid); > else > - addr = vmalloc(size); > + addr = vzalloc(size); > > return addr; > } > @@ -163,14 +128,11 @@ static void free_page_cgroup(void *addr) > > static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) > { > - struct page_cgroup *base, *pc; > struct mem_section *section; > + struct page_cgroup *base; > unsigned long table_size; > - unsigned long nr; > - int index; > > - nr = pfn_to_section_nr(pfn); > - section = __nr_to_section(nr); > + section = __pfn_to_section(pfn); > > if (section->page_cgroup) > return 0; > @@ -190,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) > return -ENOMEM; > } > > - for (index = 0; index < PAGES_PER_SECTION; index++) { > - pc = base + index; > - init_page_cgroup(pc, nr); > - } > /* > * The passed "pfn" may not be aligned to SECTION. For the calculation > * we need to apply a mask. > -- > 1.7.6 > > -- > To unsubscribe, send a message with 'unsubscribe linux-mm' in > the body to majordomo@kvack.org. For more info on Linux MM, > see: http://www.linux-mm.org/ . > Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ > Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a> -- Michal Hocko SUSE Labs SUSE LINUX s.r.o. Lihovarska 1060/12 190 00 Praha 9 Czech Republic -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-09-21 15:32 UTC|newest] Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top 2011-09-12 10:57 [patch 0/11] mm: memcg naturalization -rc3 Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-12 10:57 ` [patch 01/11] mm: memcg: consolidate hierarchy iteration primitives Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-12 22:37 ` Kirill A. Shutemov 2011-09-12 22:37 ` Kirill A. Shutemov 2011-09-13 5:40 ` Johannes Weiner 2011-09-13 5:40 ` Johannes Weiner 2011-09-19 13:06 ` Michal Hocko 2011-09-19 13:06 ` Michal Hocko 2011-09-13 10:06 ` KAMEZAWA Hiroyuki 2011-09-13 10:06 ` KAMEZAWA Hiroyuki 2011-09-19 12:53 ` Michal Hocko 2011-09-19 12:53 ` Michal Hocko 2011-09-20 8:45 ` Johannes Weiner 2011-09-20 8:45 ` Johannes Weiner 2011-09-20 8:53 ` Michal Hocko 2011-09-20 8:53 ` Michal Hocko 2011-09-12 10:57 ` [patch 02/11] mm: vmscan: distinguish global reclaim from global LRU scanning Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-12 23:02 ` Kirill A. Shutemov 2011-09-12 23:02 ` Kirill A. Shutemov 2011-09-13 5:48 ` Johannes Weiner 2011-09-13 5:48 ` Johannes Weiner 2011-09-13 10:07 ` KAMEZAWA Hiroyuki 2011-09-13 10:07 ` KAMEZAWA Hiroyuki 2011-09-19 13:23 ` Michal Hocko 2011-09-19 13:23 ` Michal Hocko 2011-09-19 13:46 ` Michal Hocko 2011-09-19 13:46 ` Michal Hocko 2011-09-20 8:52 ` Johannes Weiner 2011-09-20 8:52 ` Johannes Weiner 2011-09-12 10:57 ` [patch 03/11] mm: vmscan: distinguish between memcg triggering reclaim and memcg being scanned Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:23 ` KAMEZAWA Hiroyuki 2011-09-13 10:23 ` KAMEZAWA Hiroyuki 2011-09-19 14:29 ` Michal Hocko 2011-09-19 14:29 ` Michal Hocko 2011-09-20 8:58 ` Johannes Weiner 2011-09-20 8:58 ` Johannes Weiner 2011-09-20 9:17 ` Michal Hocko 2011-09-20 9:17 ` Michal Hocko 2011-09-29 7:55 ` Johannes Weiner 2011-09-29 7:55 ` Johannes Weiner 2011-09-12 10:57 ` [patch 04/11] mm: memcg: per-priority per-zone hierarchy scan generations Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:27 ` KAMEZAWA Hiroyuki 2011-09-13 10:27 ` KAMEZAWA Hiroyuki 2011-09-13 11:03 ` Johannes Weiner 2011-09-13 11:03 ` Johannes Weiner 2011-09-14 0:55 ` KAMEZAWA Hiroyuki 2011-09-14 0:55 ` KAMEZAWA Hiroyuki 2011-09-14 5:56 ` Johannes Weiner 2011-09-14 5:56 ` Johannes Weiner 2011-09-14 7:40 ` KAMEZAWA Hiroyuki 2011-09-14 7:40 ` KAMEZAWA Hiroyuki 2011-09-20 8:15 ` Michal Hocko 2011-09-20 8:15 ` Michal Hocko 2011-09-20 8:45 ` Michal Hocko 2011-09-20 8:45 ` Michal Hocko 2011-09-20 9:10 ` Johannes Weiner 2011-09-20 9:10 ` Johannes Weiner 2011-09-20 12:37 ` Michal Hocko 2011-09-20 12:37 ` Michal Hocko 2011-09-12 10:57 ` [patch 05/11] mm: move memcg hierarchy reclaim to generic reclaim code Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:31 ` KAMEZAWA Hiroyuki 2011-09-13 10:31 ` KAMEZAWA Hiroyuki 2011-09-20 13:09 ` Michal Hocko 2011-09-20 13:09 ` Michal Hocko 2011-09-20 13:29 ` Johannes Weiner 2011-09-20 13:29 ` Johannes Weiner 2011-09-20 14:08 ` Michal Hocko 2011-09-20 14:08 ` Michal Hocko 2011-09-12 10:57 ` [patch 06/11] mm: memcg: remove optimization of keeping the root_mem_cgroup LRU lists empty Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:34 ` KAMEZAWA Hiroyuki 2011-09-13 10:34 ` KAMEZAWA Hiroyuki 2011-09-20 15:02 ` Michal Hocko 2011-09-20 15:02 ` Michal Hocko 2011-09-29 9:20 ` Johannes Weiner 2011-09-29 9:20 ` Johannes Weiner 2011-09-29 9:49 ` Michal Hocko 2011-09-29 9:49 ` Michal Hocko 2011-09-12 10:57 ` [patch 07/11] mm: vmscan: convert unevictable page rescue scanner to per-memcg LRU lists Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:37 ` KAMEZAWA Hiroyuki 2011-09-13 10:37 ` KAMEZAWA Hiroyuki 2011-09-21 12:33 ` Michal Hocko 2011-09-21 12:33 ` Michal Hocko 2011-09-21 13:47 ` Johannes Weiner 2011-09-21 13:47 ` Johannes Weiner 2011-09-21 14:08 ` Michal Hocko 2011-09-21 14:08 ` Michal Hocko 2011-09-12 10:57 ` [patch 08/11] mm: vmscan: convert global reclaim " Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:41 ` KAMEZAWA Hiroyuki 2011-09-13 10:41 ` KAMEZAWA Hiroyuki 2011-09-21 13:10 ` Michal Hocko 2011-09-21 13:10 ` Michal Hocko 2011-09-21 13:51 ` Johannes Weiner 2011-09-21 13:51 ` Johannes Weiner 2011-09-21 13:57 ` Michal Hocko 2011-09-21 13:57 ` Michal Hocko 2011-09-12 10:57 ` [patch 09/11] mm: collect LRU list heads into struct lruvec Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:43 ` KAMEZAWA Hiroyuki 2011-09-13 10:43 ` KAMEZAWA Hiroyuki 2011-09-21 13:43 ` Michal Hocko 2011-09-21 13:43 ` Michal Hocko 2011-09-21 15:15 ` Michal Hocko 2011-09-21 15:15 ` Michal Hocko 2011-09-12 10:57 ` [patch 10/11] mm: make per-memcg LRU lists exclusive Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:47 ` KAMEZAWA Hiroyuki 2011-09-13 10:47 ` KAMEZAWA Hiroyuki 2011-09-21 15:24 ` Michal Hocko 2011-09-21 15:24 ` Michal Hocko 2011-09-21 15:47 ` Johannes Weiner 2011-09-21 15:47 ` Johannes Weiner 2011-09-21 16:05 ` Michal Hocko 2011-09-21 16:05 ` Michal Hocko 2011-09-12 10:57 ` [patch 11/11] mm: memcg: remove unused node/section info from pc->flags Johannes Weiner 2011-09-12 10:57 ` Johannes Weiner 2011-09-13 10:50 ` KAMEZAWA Hiroyuki 2011-09-13 10:50 ` KAMEZAWA Hiroyuki 2011-09-21 15:32 ` Michal Hocko [this message] 2011-09-21 15:32 ` Michal Hocko 2011-09-13 20:35 ` [patch 0/11] mm: memcg naturalization -rc3 Kirill A. Shutemov 2011-09-13 20:35 ` Kirill A. Shutemov
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20110921153249.GJ8501@tiehlicka.suse.cz \ --to=mhocko@suse.cz \ --cc=akpm@linux-foundation.org \ --cc=bsingharora@gmail.com \ --cc=gthelen@google.com \ --cc=hch@infradead.org \ --cc=jweiner@redhat.com \ --cc=kamezawa.hiroyu@jp.fujitsu.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=minchan.kim@gmail.com \ --cc=nishimura@mxp.nes.nec.co.jp \ --cc=riel@redhat.com \ --cc=walken@google.com \ --cc=yinghan@google.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.