From: Glauber Costa <glommer@openvz.org> To: <linux-mm@kvack.org> Cc: Andrew Morton <akpm@linux-foundation.org>, Mel Gorman <mgorman@suse.de>, <cgroups@vger.kernel.org>, <kamezawa.hiroyu@jp.fujitsu.com>, Johannes Weiner <hannes@cmpxchg.org>, Michal Hocko <mhocko@suse.cz>, hughd@google.com, Greg Thelen <gthelen@google.com>, <linux-fsdevel@vger.kernel.org>, Dave Chinner <dchinner@redhat.com>, Glauber Costa <glommer@openvz.org> Subject: [PATCH v5 11/31] list_lru: per-node list infrastructure Date: Thu, 9 May 2013 10:06:28 +0400 [thread overview] Message-ID: <1368079608-5611-12-git-send-email-glommer@openvz.org> (raw) In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org> From: Dave Chinner <dchinner@redhat.com> Now that we have an LRU list API, we can start to enhance the implementation. This splits the single LRU list into per-node lists and locks to enhance scalability. Items are placed on lists according to the node the memory belongs to. To make scanning the lists efficient, also track whether the per-node lists have entries in them in a active nodemask. [ glommer: fixed warnings ] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> --- include/linux/list_lru.h | 14 ++-- lib/list_lru.c | 162 +++++++++++++++++++++++++++++++++++------------ 2 files changed, 130 insertions(+), 46 deletions(-) diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index d77ddaa..262d9d8 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -8,6 +8,7 @@ #define _LRU_LIST_H #include <linux/list.h> +#include <linux/nodemask.h> enum lru_status { LRU_REMOVED, /* item removed from list */ @@ -17,20 +18,21 @@ enum lru_status { internally, but has to return locked. */ }; -struct list_lru { +struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; +} ____cacheline_aligned_in_smp; + +struct list_lru { + struct list_lru_node node[MAX_NUMNODES]; + nodemask_t active_nodes; }; int list_lru_init(struct list_lru *lru); int list_lru_add(struct list_lru *lru, struct list_head *item); int list_lru_del(struct list_lru *lru, struct list_head *item); - -static inline unsigned long list_lru_count(struct list_lru *lru) -{ - return lru->nr_items; -} +unsigned long list_lru_count(struct list_lru *lru); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); diff --git a/lib/list_lru.c b/lib/list_lru.c index 219d79f..6a2ad81 100644 --- a/lib/list_lru.c +++ b/lib/list_lru.c @@ -6,6 +6,7 @@ */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mm.h> #include <linux/list_lru.h> int @@ -13,14 +14,19 @@ list_lru_add( struct list_lru *lru, struct list_head *item) { - spin_lock(&lru->lock); + int nid = page_to_nid(virt_to_page(item)); + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); + BUG_ON(nlru->nr_items < 0); if (list_empty(item)) { - list_add_tail(item, &lru->list); - lru->nr_items++; - spin_unlock(&lru->lock); + list_add_tail(item, &nlru->list); + if (nlru->nr_items++ == 0) + node_set(nid, lru->active_nodes); + spin_unlock(&nlru->lock); return 1; } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return 0; } EXPORT_SYMBOL_GPL(list_lru_add); @@ -30,44 +36,73 @@ list_lru_del( struct list_lru *lru, struct list_head *item) { - spin_lock(&lru->lock); + int nid = page_to_nid(virt_to_page(item)); + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); if (!list_empty(item)) { list_del_init(item); - lru->nr_items--; - spin_unlock(&lru->lock); + if (--nlru->nr_items == 0) + node_clear(nid, lru->active_nodes); + BUG_ON(nlru->nr_items < 0); + spin_unlock(&nlru->lock); return 1; } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return 0; } EXPORT_SYMBOL_GPL(list_lru_del); unsigned long -list_lru_walk( - struct list_lru *lru, - list_lru_walk_cb isolate, - void *cb_arg, - long nr_to_walk) +list_lru_count( + struct list_lru *lru) { + long count = 0; + int nid; + + for_each_node_mask(nid, lru->active_nodes) { + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); + BUG_ON(nlru->nr_items < 0); + count += nlru->nr_items; + spin_unlock(&nlru->lock); + } + + return count; +} +EXPORT_SYMBOL_GPL(list_lru_count); + +static unsigned long +list_lru_walk_node( + struct list_lru *lru, + int nid, + list_lru_walk_cb isolate, + void *cb_arg, + long *nr_to_walk) +{ + struct list_lru_node *nlru = &lru->node[nid]; struct list_head *item, *n; - unsigned long removed = 0; + unsigned long isolated = 0; - spin_lock(&lru->lock); + spin_lock(&nlru->lock); restart: - list_for_each_safe(item, n, &lru->list) { + list_for_each_safe(item, n, &nlru->list) { enum lru_status ret; - if (nr_to_walk-- < 0) + if ((*nr_to_walk)-- < 0) break; - ret = isolate(item, &lru->lock, cb_arg); + ret = isolate(item, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED: - lru->nr_items--; - removed++; + if (--nlru->nr_items == 0) + node_clear(nid, lru->active_nodes); + BUG_ON(nlru->nr_items < 0); + isolated++; break; case LRU_ROTATE: - list_move_tail(item, &lru->list); + list_move_tail(item, &nlru->list); break; case LRU_SKIP: break; @@ -77,42 +112,89 @@ restart: BUG(); } } - spin_unlock(&lru->lock); - return removed; + spin_unlock(&nlru->lock); + return isolated; } -EXPORT_SYMBOL_GPL(list_lru_walk); unsigned long -list_lru_dispose_all( - struct list_lru *lru, - list_lru_dispose_cb dispose) +list_lru_walk( + struct list_lru *lru, + list_lru_walk_cb isolate, + void *cb_arg, + long nr_to_walk) { - unsigned long disposed = 0; + long isolated = 0; + int nid; + + for_each_node_mask(nid, lru->active_nodes) { + isolated += list_lru_walk_node(lru, nid, isolate, + cb_arg, &nr_to_walk); + if (nr_to_walk <= 0) + break; + } + return isolated; +} +EXPORT_SYMBOL_GPL(list_lru_walk); + +static unsigned long +list_lru_dispose_all_node( + struct list_lru *lru, + int nid, + list_lru_dispose_cb dispose) +{ + struct list_lru_node *nlru = &lru->node[nid]; LIST_HEAD(dispose_list); + unsigned long disposed = 0; - spin_lock(&lru->lock); - while (!list_empty(&lru->list)) { - list_splice_init(&lru->list, &dispose_list); - disposed += lru->nr_items; - lru->nr_items = 0; - spin_unlock(&lru->lock); + spin_lock(&nlru->lock); + while (!list_empty(&nlru->list)) { + list_splice_init(&nlru->list, &dispose_list); + disposed += nlru->nr_items; + nlru->nr_items = 0; + node_clear(nid, lru->active_nodes); + spin_unlock(&nlru->lock); dispose(&dispose_list); - spin_lock(&lru->lock); + spin_lock(&nlru->lock); } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return disposed; } +unsigned long +list_lru_dispose_all( + struct list_lru *lru, + list_lru_dispose_cb dispose) +{ + unsigned long disposed; + unsigned long total = 0; + int nid; + + do { + disposed = 0; + for_each_node_mask(nid, lru->active_nodes) { + disposed += list_lru_dispose_all_node(lru, nid, + dispose); + } + total += disposed; + } while (disposed != 0); + + return total; +} + int list_lru_init( struct list_lru *lru) { - spin_lock_init(&lru->lock); - INIT_LIST_HEAD(&lru->list); - lru->nr_items = 0; + int i; + nodes_clear(lru->active_nodes); + for (i = 0; i < MAX_NUMNODES; i++) { + spin_lock_init(&lru->node[i].lock); + INIT_LIST_HEAD(&lru->node[i].list); + lru->node[i].nr_items = 0; + } return 0; } EXPORT_SYMBOL_GPL(list_lru_init); -- 1.8.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
WARNING: multiple messages have this Message-ID (diff)
From: Glauber Costa <glommer@openvz.org> To: linux-mm@kvack.org Cc: Andrew Morton <akpm@linux-foundation.org>, Mel Gorman <mgorman@suse.de>, cgroups@vger.kernel.org, kamezawa.hiroyu@jp.fujitsu.com, Johannes Weiner <hannes@cmpxchg.org>, Michal Hocko <mhocko@suse.cz>, hughd@google.com, Greg Thelen <gthelen@google.com>, linux-fsdevel@vger.kernel.org, Dave Chinner <dchinner@redhat.com>, Glauber Costa <glommer@openvz.org> Subject: [PATCH v5 11/31] list_lru: per-node list infrastructure Date: Thu, 9 May 2013 10:06:28 +0400 [thread overview] Message-ID: <1368079608-5611-12-git-send-email-glommer@openvz.org> (raw) In-Reply-To: <1368079608-5611-1-git-send-email-glommer@openvz.org> From: Dave Chinner <dchinner@redhat.com> Now that we have an LRU list API, we can start to enhance the implementation. This splits the single LRU list into per-node lists and locks to enhance scalability. Items are placed on lists according to the node the memory belongs to. To make scanning the lists efficient, also track whether the per-node lists have entries in them in a active nodemask. [ glommer: fixed warnings ] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> --- include/linux/list_lru.h | 14 ++-- lib/list_lru.c | 162 +++++++++++++++++++++++++++++++++++------------ 2 files changed, 130 insertions(+), 46 deletions(-) diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index d77ddaa..262d9d8 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -8,6 +8,7 @@ #define _LRU_LIST_H #include <linux/list.h> +#include <linux/nodemask.h> enum lru_status { LRU_REMOVED, /* item removed from list */ @@ -17,20 +18,21 @@ enum lru_status { internally, but has to return locked. */ }; -struct list_lru { +struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; +} ____cacheline_aligned_in_smp; + +struct list_lru { + struct list_lru_node node[MAX_NUMNODES]; + nodemask_t active_nodes; }; int list_lru_init(struct list_lru *lru); int list_lru_add(struct list_lru *lru, struct list_head *item); int list_lru_del(struct list_lru *lru, struct list_head *item); - -static inline unsigned long list_lru_count(struct list_lru *lru) -{ - return lru->nr_items; -} +unsigned long list_lru_count(struct list_lru *lru); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); diff --git a/lib/list_lru.c b/lib/list_lru.c index 219d79f..6a2ad81 100644 --- a/lib/list_lru.c +++ b/lib/list_lru.c @@ -6,6 +6,7 @@ */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mm.h> #include <linux/list_lru.h> int @@ -13,14 +14,19 @@ list_lru_add( struct list_lru *lru, struct list_head *item) { - spin_lock(&lru->lock); + int nid = page_to_nid(virt_to_page(item)); + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); + BUG_ON(nlru->nr_items < 0); if (list_empty(item)) { - list_add_tail(item, &lru->list); - lru->nr_items++; - spin_unlock(&lru->lock); + list_add_tail(item, &nlru->list); + if (nlru->nr_items++ == 0) + node_set(nid, lru->active_nodes); + spin_unlock(&nlru->lock); return 1; } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return 0; } EXPORT_SYMBOL_GPL(list_lru_add); @@ -30,44 +36,73 @@ list_lru_del( struct list_lru *lru, struct list_head *item) { - spin_lock(&lru->lock); + int nid = page_to_nid(virt_to_page(item)); + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); if (!list_empty(item)) { list_del_init(item); - lru->nr_items--; - spin_unlock(&lru->lock); + if (--nlru->nr_items == 0) + node_clear(nid, lru->active_nodes); + BUG_ON(nlru->nr_items < 0); + spin_unlock(&nlru->lock); return 1; } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return 0; } EXPORT_SYMBOL_GPL(list_lru_del); unsigned long -list_lru_walk( - struct list_lru *lru, - list_lru_walk_cb isolate, - void *cb_arg, - long nr_to_walk) +list_lru_count( + struct list_lru *lru) { + long count = 0; + int nid; + + for_each_node_mask(nid, lru->active_nodes) { + struct list_lru_node *nlru = &lru->node[nid]; + + spin_lock(&nlru->lock); + BUG_ON(nlru->nr_items < 0); + count += nlru->nr_items; + spin_unlock(&nlru->lock); + } + + return count; +} +EXPORT_SYMBOL_GPL(list_lru_count); + +static unsigned long +list_lru_walk_node( + struct list_lru *lru, + int nid, + list_lru_walk_cb isolate, + void *cb_arg, + long *nr_to_walk) +{ + struct list_lru_node *nlru = &lru->node[nid]; struct list_head *item, *n; - unsigned long removed = 0; + unsigned long isolated = 0; - spin_lock(&lru->lock); + spin_lock(&nlru->lock); restart: - list_for_each_safe(item, n, &lru->list) { + list_for_each_safe(item, n, &nlru->list) { enum lru_status ret; - if (nr_to_walk-- < 0) + if ((*nr_to_walk)-- < 0) break; - ret = isolate(item, &lru->lock, cb_arg); + ret = isolate(item, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED: - lru->nr_items--; - removed++; + if (--nlru->nr_items == 0) + node_clear(nid, lru->active_nodes); + BUG_ON(nlru->nr_items < 0); + isolated++; break; case LRU_ROTATE: - list_move_tail(item, &lru->list); + list_move_tail(item, &nlru->list); break; case LRU_SKIP: break; @@ -77,42 +112,89 @@ restart: BUG(); } } - spin_unlock(&lru->lock); - return removed; + spin_unlock(&nlru->lock); + return isolated; } -EXPORT_SYMBOL_GPL(list_lru_walk); unsigned long -list_lru_dispose_all( - struct list_lru *lru, - list_lru_dispose_cb dispose) +list_lru_walk( + struct list_lru *lru, + list_lru_walk_cb isolate, + void *cb_arg, + long nr_to_walk) { - unsigned long disposed = 0; + long isolated = 0; + int nid; + + for_each_node_mask(nid, lru->active_nodes) { + isolated += list_lru_walk_node(lru, nid, isolate, + cb_arg, &nr_to_walk); + if (nr_to_walk <= 0) + break; + } + return isolated; +} +EXPORT_SYMBOL_GPL(list_lru_walk); + +static unsigned long +list_lru_dispose_all_node( + struct list_lru *lru, + int nid, + list_lru_dispose_cb dispose) +{ + struct list_lru_node *nlru = &lru->node[nid]; LIST_HEAD(dispose_list); + unsigned long disposed = 0; - spin_lock(&lru->lock); - while (!list_empty(&lru->list)) { - list_splice_init(&lru->list, &dispose_list); - disposed += lru->nr_items; - lru->nr_items = 0; - spin_unlock(&lru->lock); + spin_lock(&nlru->lock); + while (!list_empty(&nlru->list)) { + list_splice_init(&nlru->list, &dispose_list); + disposed += nlru->nr_items; + nlru->nr_items = 0; + node_clear(nid, lru->active_nodes); + spin_unlock(&nlru->lock); dispose(&dispose_list); - spin_lock(&lru->lock); + spin_lock(&nlru->lock); } - spin_unlock(&lru->lock); + spin_unlock(&nlru->lock); return disposed; } +unsigned long +list_lru_dispose_all( + struct list_lru *lru, + list_lru_dispose_cb dispose) +{ + unsigned long disposed; + unsigned long total = 0; + int nid; + + do { + disposed = 0; + for_each_node_mask(nid, lru->active_nodes) { + disposed += list_lru_dispose_all_node(lru, nid, + dispose); + } + total += disposed; + } while (disposed != 0); + + return total; +} + int list_lru_init( struct list_lru *lru) { - spin_lock_init(&lru->lock); - INIT_LIST_HEAD(&lru->list); - lru->nr_items = 0; + int i; + nodes_clear(lru->active_nodes); + for (i = 0; i < MAX_NUMNODES; i++) { + spin_lock_init(&lru->node[i].lock); + INIT_LIST_HEAD(&lru->node[i].list); + lru->node[i].nr_items = 0; + } return 0; } EXPORT_SYMBOL_GPL(list_lru_init); -- 1.8.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-05-09 6:06 UTC|newest] Thread overview: 137+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-05-09 6:06 [PATCH v5 00/31] kmemcg shrinkers Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 01/31] super: fix calculation of shrinkable objects for small numbers Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 02/31] vmscan: take at least one pass with shrinkers Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 11:12 ` Mel Gorman 2013-05-09 11:12 ` Mel Gorman [not found] ` <20130509111226.GR11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 11:28 ` Glauber Costa 2013-05-09 11:28 ` Glauber Costa 2013-05-09 11:28 ` Glauber Costa [not found] ` <518B884C.9090704-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org> 2013-05-09 11:35 ` Glauber Costa 2013-05-09 11:35 ` Glauber Costa 2013-05-09 11:35 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 03/31] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 04/31] dentry: move to per-sb LRU locks Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa [not found] ` <1368079608-5611-5-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org> 2013-05-10 5:29 ` Dave Chinner 2013-05-10 5:29 ` Dave Chinner 2013-05-10 8:16 ` Dave Chinner 2013-05-09 6:06 ` [PATCH v5 05/31] dcache: remove dentries from LRU before putting on dispose list Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 06/31] mm: new shrinker API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 13:30 ` Mel Gorman [not found] ` <1368079608-5611-1-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org> 2013-05-09 6:06 ` [PATCH v5 07/31] shrinker: convert superblock shrinkers to new API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 13:33 ` Mel Gorman 2013-05-09 6:06 ` [PATCH v5 08/31] list: add a new LRU list type Glauber Costa 2013-05-09 6:06 ` Glauber Costa [not found] ` <1368079608-5611-9-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org> 2013-05-09 13:37 ` Mel Gorman 2013-05-09 13:37 ` Mel Gorman [not found] ` <20130509133742.GW11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 21:02 ` Glauber Costa 2013-05-09 21:02 ` Glauber Costa 2013-05-09 21:02 ` Glauber Costa 2013-05-10 9:21 ` Mel Gorman 2013-05-10 9:56 ` Glauber Costa 2013-05-10 9:56 ` Glauber Costa [not found] ` <518CC44D.1020409-bzQdu9zFT3WakBO8gow8eQ@public.gmane.org> 2013-05-10 10:01 ` Mel Gorman 2013-05-10 10:01 ` Mel Gorman 2013-05-09 6:06 ` [PATCH v5 09/31] inode: convert inode lru list to generic lru list code Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 10/31] dcache: convert to use new lru list infrastructure Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa [this message] 2013-05-09 6:06 ` [PATCH v5 11/31] list_lru: per-node " Glauber Costa 2013-05-09 13:42 ` Mel Gorman [not found] ` <20130509134246.GX11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 21:05 ` Glauber Costa 2013-05-09 21:05 ` Glauber Costa 2013-05-09 21:05 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 12/31] shrinker: add node awareness Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 13/31] fs: convert inode and dentry shrinking to be node aware Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 14/31] xfs: convert buftarg LRU to generic code Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa [not found] ` <1368079608-5611-15-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org> 2013-05-09 13:43 ` Mel Gorman 2013-05-09 13:43 ` Mel Gorman 2013-05-09 6:06 ` [PATCH v5 15/31] xfs: convert dquot cache lru to list_lru Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 16/31] fs: convert fs shrinkers to new scan/count API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 17/31] drivers: convert shrinkers to new count/scan API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 13:52 ` Mel Gorman 2013-05-09 13:52 ` Mel Gorman [not found] ` <20130509135209.GZ11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 21:19 ` Glauber Costa 2013-05-09 21:19 ` Glauber Costa 2013-05-09 21:19 ` Glauber Costa 2013-05-10 9:00 ` Mel Gorman 2013-05-09 6:06 ` [PATCH v5 18/31] shrinker: convert remaining shrinkers to " Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 19/31] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa [not found] ` <1368079608-5611-20-git-send-email-glommer-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org> 2013-05-10 1:24 ` Kirill A. Shutemov 2013-05-10 1:24 ` Kirill A. Shutemov 2013-05-09 6:06 ` [PATCH v5 20/31] shrinker: Kill old ->shrink API Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 13:53 ` Mel Gorman 2013-05-09 6:06 ` [PATCH v5 21/31] vmscan: also shrink slab in memcg pressure Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 22/31] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 23/31] lru: add an element to a memcg list Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 24/31] list_lru: per-memcg walks Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 25/31] memcg: per-memcg kmem shrinking Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 26/31] memcg: scan cache objects hierarchically Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 27/31] super: targeted memcg reclaim Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 28/31] memcg: move initialization to memcg creation Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 29/31] vmpressure: in-kernel notifications Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 30/31] memcg: reap dead memcgs upon global memory pressure Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` [PATCH v5 31/31] memcg: debugging facility to access dangling memcgs Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 6:06 ` Glauber Costa 2013-05-09 10:55 ` [PATCH v5 00/31] kmemcg shrinkers Mel Gorman [not found] ` <20130509105519.GQ11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 11:34 ` Glauber Costa 2013-05-09 11:34 ` Glauber Costa 2013-05-09 11:34 ` Glauber Costa 2013-05-09 13:18 ` Dave Chinner 2013-05-09 14:03 ` Mel Gorman [not found] ` <20130509140311.GB11497-l3A5Bk7waGM@public.gmane.org> 2013-05-09 21:24 ` Glauber Costa 2013-05-09 21:24 ` Glauber Costa 2013-05-09 21:24 ` Glauber Costa -- strict thread matches above, loose matches on Subject: below -- 2013-05-08 20:22 Glauber Costa 2013-05-08 20:22 ` [PATCH v5 11/31] list_lru: per-node list infrastructure Glauber Costa 2013-05-08 20:22 ` Glauber Costa
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1368079608-5611-12-git-send-email-glommer@openvz.org \ --to=glommer@openvz.org \ --cc=akpm@linux-foundation.org \ --cc=cgroups@vger.kernel.org \ --cc=dchinner@redhat.com \ --cc=gthelen@google.com \ --cc=hannes@cmpxchg.org \ --cc=hughd@google.com \ --cc=kamezawa.hiroyu@jp.fujitsu.com \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=mgorman@suse.de \ --cc=mhocko@suse.cz \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.