All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suleiman Souhlal <suleiman@google.com>
To: Glauber Costa <glommer@parallels.com>
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, devel@openvz.org,
	kamezawa.hiroyu@jp.fujitsu.com, Michal Hocko <mhocko@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	fweisbec@gmail.com, Greg Thelen <gthelen@google.com>,
	Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@cs.helsinki.fi>
Subject: Re: [PATCH 19/23] slab: per-memcg accounting of slab caches
Date: Mon, 30 Apr 2012 14:25:47 -0700	[thread overview]
Message-ID: <CABCjUKCX6MvOaS5s_n6tYcmfyDCgW60aXTG8ZbznmZOAfS=joA@mail.gmail.com> (raw)
In-Reply-To: <1335138820-26590-8-git-send-email-glommer@parallels.com>

On Sun, Apr 22, 2012 at 4:53 PM, Glauber Costa <glommer@parallels.com> wrote:
> This patch charges allocation of a slab object to a particular
> memcg.
>
> The cache is selected with mem_cgroup_get_kmem_cache(),
> which is the biggest overhead we pay here, because
> it happens at all allocations. However, other than forcing
> a function call, this function is not very expensive, and
> try to return as soon as we realize we are not a memcg cache.
>
> The charge/uncharge functions are heavier, but are only called
> for new page allocations.
>
> Code is heavily inspired by Suleiman's, with adaptations to
> the patchset and minor simplifications by me.
>
> Signed-off-by: Glauber Costa <glommer@parallels.com>
> CC: Christoph Lameter <cl@linux.com>
> CC: Pekka Enberg <penberg@cs.helsinki.fi>
> CC: Michal Hocko <mhocko@suse.cz>
> CC: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
> CC: Johannes Weiner <hannes@cmpxchg.org>
> CC: Suleiman Souhlal <suleiman@google.com>
> ---
>  include/linux/slab_def.h |   66 ++++++++++++++++++++++++++++-
>  mm/slab.c                |  105 ++++++++++++++++++++++++++++++++++++++++++----
>  2 files changed, 162 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 54d25d7..c4f7e45 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -51,7 +51,7 @@ struct kmem_cache {
>        void (*ctor)(void *obj);
>
>  /* 4) cache creation/removal */
> -       const char *name;
> +       char *name;
>        struct list_head next;
>
>  /* 5) statistics */
> @@ -219,4 +219,68 @@ found:
>
>  #endif /* CONFIG_NUMA */
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +
> +void kmem_cache_drop_ref(struct kmem_cache *cachep);
> +
> +static inline void
> +kmem_cache_get_ref(struct kmem_cache *cachep)
> +{
> +       if (cachep->memcg_params.id == -1 &&
> +           unlikely(!atomic_add_unless(&cachep->memcg_params.refcnt, 1, 0)))
> +               BUG();
> +}
> +
> +static inline void
> +mem_cgroup_put_kmem_cache(struct kmem_cache *cachep)
> +{
> +       rcu_read_unlock();
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_prepare_sleep(struct kmem_cache *cachep)
> +{
> +       /*
> +        * Make sure the cache doesn't get freed while we have interrupts
> +        * enabled.
> +        */
> +       kmem_cache_get_ref(cachep);
> +       rcu_read_unlock();
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_finish_sleep(struct kmem_cache *cachep)
> +{
> +       rcu_read_lock();
> +       kmem_cache_drop_ref(cachep);
> +}
> +
> +#else /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
> +static inline void
> +kmem_cache_get_ref(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +kmem_cache_drop_ref(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_put_kmem_cache(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_prepare_sleep(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_finish_sleep(struct kmem_cache *cachep)
> +{
> +}
> +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
>  #endif /* _LINUX_SLAB_DEF_H */
> diff --git a/mm/slab.c b/mm/slab.c
> index 13948c3..ac0916b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -1818,20 +1818,28 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
>        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
>                flags |= __GFP_RECLAIMABLE;
>
> +       nr_pages = (1 << cachep->gfporder);
> +       if (!mem_cgroup_charge_slab(cachep, flags, nr_pages * PAGE_SIZE))
> +               return NULL;
> +
>        page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
>        if (!page) {
>                if (!(flags & __GFP_NOWARN) && printk_ratelimit())
>                        slab_out_of_memory(cachep, flags, nodeid);
> +
> +               mem_cgroup_uncharge_slab(cachep, nr_pages * PAGE_SIZE);
>                return NULL;
>        }
>
> -       nr_pages = (1 << cachep->gfporder);
>        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
>                add_zone_page_state(page_zone(page),
>                        NR_SLAB_RECLAIMABLE, nr_pages);
>        else
>                add_zone_page_state(page_zone(page),
>                        NR_SLAB_UNRECLAIMABLE, nr_pages);
> +
> +       kmem_cache_get_ref(cachep);
> +
>        for (i = 0; i < nr_pages; i++)
>                __SetPageSlab(page + i);
>
> @@ -1864,6 +1872,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
>        else
>                sub_zone_page_state(page_zone(page),
>                                NR_SLAB_UNRECLAIMABLE, nr_freed);
> +       mem_cgroup_uncharge_slab(cachep, i * PAGE_SIZE);
> +       kmem_cache_drop_ref(cachep);
>        while (i--) {
>                BUG_ON(!PageSlab(page));
>                __ClearPageSlab(page);
> @@ -2823,12 +2833,28 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
>        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
>                rcu_barrier();
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +       /* Not a memcg cache */
> +       if (cachep->memcg_params.id != -1) {
> +               mem_cgroup_release_cache(cachep);
> +               mem_cgroup_flush_cache_create_queue();
> +       }
> +#endif
>        __kmem_cache_destroy(cachep);
>        mutex_unlock(&cache_chain_mutex);
>        put_online_cpus();
>  }
>  EXPORT_SYMBOL(kmem_cache_destroy);
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +void kmem_cache_drop_ref(struct kmem_cache *cachep)
> +{
> +       if (cachep->memcg_params.id == -1 &&
> +           unlikely(atomic_dec_and_test(&cachep->memcg_params.refcnt)))
> +               mem_cgroup_destroy_cache(cachep);
> +}
> +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
>  /*
>  * Get the memory for a slab management obj.
>  * For a slab cache when the slab descriptor is off-slab, slab descriptors
> @@ -3028,8 +3054,10 @@ static int cache_grow(struct kmem_cache *cachep,
>
>        offset *= cachep->colour_off;
>
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_enable();
> +               mem_cgroup_kmem_cache_prepare_sleep(cachep);
> +       }
>
>        /*
>         * The test for missing atomic flag is performed here, rather than
> @@ -3058,8 +3086,10 @@ static int cache_grow(struct kmem_cache *cachep,
>
>        cache_init_objs(cachep, slabp);
>
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_disable();
> +               mem_cgroup_kmem_cache_finish_sleep(cachep);
> +       }
>        check_irq_off();
>        spin_lock(&l3->list_lock);
>
> @@ -3072,8 +3102,10 @@ static int cache_grow(struct kmem_cache *cachep,
>  opps1:
>        kmem_freepages(cachep, objp);
>  failed:
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_disable();
> +               mem_cgroup_kmem_cache_finish_sleep(cachep);
> +       }
>        return 0;
>  }
>
> @@ -3834,11 +3866,15 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
>  */
>  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
>  {
> -       void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
> +       void *ret;
> +
> +       rcu_read_lock();
> +       cachep = mem_cgroup_get_kmem_cache(cachep, flags);
> +       rcu_read_unlock();

Don't we need to check in_interrupt(), current, __GFP_NOFAIL every
time we call mem_cgroup_cgroup_get_kmem_cache()?

I would personally prefer if those checks were put inside
mem_cgroup_get_kmem_cache() instead of having to check for every
caller.

-- Suleiman

WARNING: multiple messages have this Message-ID (diff)
From: Suleiman Souhlal <suleiman@google.com>
To: Glauber Costa <glommer@parallels.com>
Cc: cgroups@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, devel@openvz.org,
	kamezawa.hiroyu@jp.fujitsu.com, Michal Hocko <mhocko@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	fweisbec@gmail.com, Greg Thelen <gthelen@google.com>,
	Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@cs.helsinki.fi>
Subject: Re: [PATCH 19/23] slab: per-memcg accounting of slab caches
Date: Mon, 30 Apr 2012 14:25:47 -0700	[thread overview]
Message-ID: <CABCjUKCX6MvOaS5s_n6tYcmfyDCgW60aXTG8ZbznmZOAfS=joA@mail.gmail.com> (raw)
In-Reply-To: <1335138820-26590-8-git-send-email-glommer@parallels.com>

On Sun, Apr 22, 2012 at 4:53 PM, Glauber Costa <glommer@parallels.com> wrote:
> This patch charges allocation of a slab object to a particular
> memcg.
>
> The cache is selected with mem_cgroup_get_kmem_cache(),
> which is the biggest overhead we pay here, because
> it happens at all allocations. However, other than forcing
> a function call, this function is not very expensive, and
> try to return as soon as we realize we are not a memcg cache.
>
> The charge/uncharge functions are heavier, but are only called
> for new page allocations.
>
> Code is heavily inspired by Suleiman's, with adaptations to
> the patchset and minor simplifications by me.
>
> Signed-off-by: Glauber Costa <glommer@parallels.com>
> CC: Christoph Lameter <cl@linux.com>
> CC: Pekka Enberg <penberg@cs.helsinki.fi>
> CC: Michal Hocko <mhocko@suse.cz>
> CC: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
> CC: Johannes Weiner <hannes@cmpxchg.org>
> CC: Suleiman Souhlal <suleiman@google.com>
> ---
>  include/linux/slab_def.h |   66 ++++++++++++++++++++++++++++-
>  mm/slab.c                |  105 ++++++++++++++++++++++++++++++++++++++++++----
>  2 files changed, 162 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 54d25d7..c4f7e45 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -51,7 +51,7 @@ struct kmem_cache {
>        void (*ctor)(void *obj);
>
>  /* 4) cache creation/removal */
> -       const char *name;
> +       char *name;
>        struct list_head next;
>
>  /* 5) statistics */
> @@ -219,4 +219,68 @@ found:
>
>  #endif /* CONFIG_NUMA */
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +
> +void kmem_cache_drop_ref(struct kmem_cache *cachep);
> +
> +static inline void
> +kmem_cache_get_ref(struct kmem_cache *cachep)
> +{
> +       if (cachep->memcg_params.id == -1 &&
> +           unlikely(!atomic_add_unless(&cachep->memcg_params.refcnt, 1, 0)))
> +               BUG();
> +}
> +
> +static inline void
> +mem_cgroup_put_kmem_cache(struct kmem_cache *cachep)
> +{
> +       rcu_read_unlock();
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_prepare_sleep(struct kmem_cache *cachep)
> +{
> +       /*
> +        * Make sure the cache doesn't get freed while we have interrupts
> +        * enabled.
> +        */
> +       kmem_cache_get_ref(cachep);
> +       rcu_read_unlock();
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_finish_sleep(struct kmem_cache *cachep)
> +{
> +       rcu_read_lock();
> +       kmem_cache_drop_ref(cachep);
> +}
> +
> +#else /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
> +static inline void
> +kmem_cache_get_ref(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +kmem_cache_drop_ref(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_put_kmem_cache(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_prepare_sleep(struct kmem_cache *cachep)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_kmem_cache_finish_sleep(struct kmem_cache *cachep)
> +{
> +}
> +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
>  #endif /* _LINUX_SLAB_DEF_H */
> diff --git a/mm/slab.c b/mm/slab.c
> index 13948c3..ac0916b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -1818,20 +1818,28 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
>        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
>                flags |= __GFP_RECLAIMABLE;
>
> +       nr_pages = (1 << cachep->gfporder);
> +       if (!mem_cgroup_charge_slab(cachep, flags, nr_pages * PAGE_SIZE))
> +               return NULL;
> +
>        page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
>        if (!page) {
>                if (!(flags & __GFP_NOWARN) && printk_ratelimit())
>                        slab_out_of_memory(cachep, flags, nodeid);
> +
> +               mem_cgroup_uncharge_slab(cachep, nr_pages * PAGE_SIZE);
>                return NULL;
>        }
>
> -       nr_pages = (1 << cachep->gfporder);
>        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
>                add_zone_page_state(page_zone(page),
>                        NR_SLAB_RECLAIMABLE, nr_pages);
>        else
>                add_zone_page_state(page_zone(page),
>                        NR_SLAB_UNRECLAIMABLE, nr_pages);
> +
> +       kmem_cache_get_ref(cachep);
> +
>        for (i = 0; i < nr_pages; i++)
>                __SetPageSlab(page + i);
>
> @@ -1864,6 +1872,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
>        else
>                sub_zone_page_state(page_zone(page),
>                                NR_SLAB_UNRECLAIMABLE, nr_freed);
> +       mem_cgroup_uncharge_slab(cachep, i * PAGE_SIZE);
> +       kmem_cache_drop_ref(cachep);
>        while (i--) {
>                BUG_ON(!PageSlab(page));
>                __ClearPageSlab(page);
> @@ -2823,12 +2833,28 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
>        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
>                rcu_barrier();
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +       /* Not a memcg cache */
> +       if (cachep->memcg_params.id != -1) {
> +               mem_cgroup_release_cache(cachep);
> +               mem_cgroup_flush_cache_create_queue();
> +       }
> +#endif
>        __kmem_cache_destroy(cachep);
>        mutex_unlock(&cache_chain_mutex);
>        put_online_cpus();
>  }
>  EXPORT_SYMBOL(kmem_cache_destroy);
>
> +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
> +void kmem_cache_drop_ref(struct kmem_cache *cachep)
> +{
> +       if (cachep->memcg_params.id == -1 &&
> +           unlikely(atomic_dec_and_test(&cachep->memcg_params.refcnt)))
> +               mem_cgroup_destroy_cache(cachep);
> +}
> +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
> +
>  /*
>  * Get the memory for a slab management obj.
>  * For a slab cache when the slab descriptor is off-slab, slab descriptors
> @@ -3028,8 +3054,10 @@ static int cache_grow(struct kmem_cache *cachep,
>
>        offset *= cachep->colour_off;
>
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_enable();
> +               mem_cgroup_kmem_cache_prepare_sleep(cachep);
> +       }
>
>        /*
>         * The test for missing atomic flag is performed here, rather than
> @@ -3058,8 +3086,10 @@ static int cache_grow(struct kmem_cache *cachep,
>
>        cache_init_objs(cachep, slabp);
>
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_disable();
> +               mem_cgroup_kmem_cache_finish_sleep(cachep);
> +       }
>        check_irq_off();
>        spin_lock(&l3->list_lock);
>
> @@ -3072,8 +3102,10 @@ static int cache_grow(struct kmem_cache *cachep,
>  opps1:
>        kmem_freepages(cachep, objp);
>  failed:
> -       if (local_flags & __GFP_WAIT)
> +       if (local_flags & __GFP_WAIT) {
>                local_irq_disable();
> +               mem_cgroup_kmem_cache_finish_sleep(cachep);
> +       }
>        return 0;
>  }
>
> @@ -3834,11 +3866,15 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
>  */
>  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
>  {
> -       void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
> +       void *ret;
> +
> +       rcu_read_lock();
> +       cachep = mem_cgroup_get_kmem_cache(cachep, flags);
> +       rcu_read_unlock();

Don't we need to check in_interrupt(), current, __GFP_NOFAIL every
time we call mem_cgroup_cgroup_get_kmem_cache()?

I would personally prefer if those checks were put inside
mem_cgroup_get_kmem_cache() instead of having to check for every
caller.

-- Suleiman

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2012-04-30 21:25 UTC|newest]

Thread overview: 178+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-04-20 21:57 [PATCH 00/23] slab+slub accounting for memcg Glauber Costa
2012-04-20 21:57 ` Glauber Costa
2012-04-20 21:57 ` Glauber Costa
2012-04-20 21:57 ` [PATCH 01/23] slub: don't create a copy of the name string in kmem_cache_create Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57 ` [PATCH 02/23] slub: always get the cache from its page in kfree Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57 ` [PATCH 03/23] slab: rename gfpflags to allocflags Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57 ` [PATCH 04/23] memcg: Make it possible to use the stock for more than one page Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-25  0:59   ` KAMEZAWA Hiroyuki
2012-04-25  0:59     ` KAMEZAWA Hiroyuki
2012-04-25  0:59     ` KAMEZAWA Hiroyuki
2012-04-20 21:57 ` [PATCH 05/23] memcg: Reclaim when more than one page needed Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-25  1:16   ` KAMEZAWA Hiroyuki
2012-04-25  1:16     ` KAMEZAWA Hiroyuki
2012-04-25  1:16     ` KAMEZAWA Hiroyuki
2012-04-20 21:57 ` [PATCH 06/23] slab: use obj_size field of struct kmem_cache when not debugging Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57 ` [PATCH 07/23] change defines to an enum Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-25  1:18   ` KAMEZAWA Hiroyuki
2012-04-25  1:18     ` KAMEZAWA Hiroyuki
2012-04-25  1:18     ` KAMEZAWA Hiroyuki
2012-04-20 21:57 ` [PATCH 08/23] don't force return value checking in res_counter_charge_nofail Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-25  1:28   ` KAMEZAWA Hiroyuki
2012-04-25  1:28     ` KAMEZAWA Hiroyuki
2012-04-25  1:28     ` KAMEZAWA Hiroyuki
2012-04-20 21:57 ` [PATCH 09/23] kmem slab accounting basic infrastructure Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-25  1:32   ` KAMEZAWA Hiroyuki
2012-04-25  1:32     ` KAMEZAWA Hiroyuki
2012-04-25  1:32     ` KAMEZAWA Hiroyuki
2012-04-25 14:38     ` Glauber Costa
2012-04-25 14:38       ` Glauber Costa
2012-04-26  0:08       ` KAMEZAWA Hiroyuki
2012-04-26  0:08         ` KAMEZAWA Hiroyuki
2012-04-26  0:08         ` KAMEZAWA Hiroyuki
2012-04-30 19:33   ` Suleiman Souhlal
2012-04-30 19:33     ` Suleiman Souhlal
2012-05-02 15:15     ` Glauber Costa
2012-05-02 15:15       ` Glauber Costa
2012-05-02 15:15       ` Glauber Costa
2012-04-20 21:57 ` [PATCH 10/23] slab/slub: struct memcg_params Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-30 19:42   ` Suleiman Souhlal
2012-04-30 19:42     ` Suleiman Souhlal
2012-04-20 21:57 ` [PATCH 11/23] slub: consider a memcg parameter in kmem_create_cache Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-20 21:57   ` Glauber Costa
2012-04-24 14:03   ` Frederic Weisbecker
2012-04-24 14:03     ` Frederic Weisbecker
2012-04-24 14:27     ` Glauber Costa
2012-04-24 14:27       ` Glauber Costa
2012-04-25  1:38   ` KAMEZAWA Hiroyuki
2012-04-25  1:38     ` KAMEZAWA Hiroyuki
2012-04-25  1:38     ` KAMEZAWA Hiroyuki
2012-04-25 14:37     ` Glauber Costa
2012-04-25 14:37       ` Glauber Costa
2012-04-30 19:51   ` Suleiman Souhlal
2012-04-30 19:51     ` Suleiman Souhlal
2012-05-02 15:18     ` Glauber Costa
2012-05-02 15:18       ` Glauber Costa
2012-04-22 23:53 ` [PATCH 12/23] slab: pass memcg parameter to kmem_cache_create Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-30 19:54   ` Suleiman Souhlal
2012-04-30 19:54     ` Suleiman Souhlal
2012-04-22 23:53 ` [PATCH 13/23] slub: create duplicate cache Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-24 14:18   ` Frederic Weisbecker
2012-04-24 14:18     ` Frederic Weisbecker
2012-04-24 14:18     ` Frederic Weisbecker
2012-04-24 14:37     ` Glauber Costa
2012-04-24 14:37       ` Glauber Costa
2012-04-26 13:10       ` Frederic Weisbecker
2012-04-26 13:10         ` Frederic Weisbecker
2012-04-26 13:10         ` Frederic Weisbecker
2012-04-30 20:15   ` Suleiman Souhlal
2012-04-30 20:15     ` Suleiman Souhlal
2012-04-22 23:53 ` [PATCH 14/23] slub: provide kmalloc_no_account Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 15/23] slab: create duplicate cache Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 16/23] slab: provide kmalloc_no_account Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-25  1:44   ` KAMEZAWA Hiroyuki
2012-04-25  1:44     ` KAMEZAWA Hiroyuki
2012-04-25 14:29     ` Glauber Costa
2012-04-25 14:29       ` Glauber Costa
2012-04-26  0:13       ` KAMEZAWA Hiroyuki
2012-04-26  0:13         ` KAMEZAWA Hiroyuki
2012-04-22 23:53 ` [PATCH 17/23] kmem controller charge/uncharge infrastructure Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-23 22:25   ` David Rientjes
2012-04-23 22:25     ` David Rientjes
2012-04-24 14:22     ` Frederic Weisbecker
2012-04-24 14:22       ` Frederic Weisbecker
2012-04-24 14:22       ` Frederic Weisbecker
2012-04-24 14:40       ` Glauber Costa
2012-04-24 14:40         ` Glauber Costa
2012-04-24 14:40         ` Glauber Costa
2012-04-24 20:25         ` David Rientjes
2012-04-24 20:25           ` David Rientjes
2012-04-24 20:25           ` David Rientjes
2012-04-24 21:36           ` Glauber Costa
2012-04-24 21:36             ` Glauber Costa
2012-04-24 22:54             ` David Rientjes
2012-04-24 22:54               ` David Rientjes
2012-04-25 14:43               ` Glauber Costa
2012-04-25 14:43                 ` Glauber Costa
2012-04-25 14:43                 ` Glauber Costa
2012-04-24 20:21       ` David Rientjes
2012-04-24 20:21         ` David Rientjes
2012-04-27 11:38         ` Frederic Weisbecker
2012-04-27 11:38           ` Frederic Weisbecker
2012-04-27 18:13           ` David Rientjes
2012-04-27 18:13             ` David Rientjes
2012-04-27 18:13             ` David Rientjes
2012-04-25  1:56       ` KAMEZAWA Hiroyuki
2012-04-25  1:56         ` KAMEZAWA Hiroyuki
2012-04-25 14:44         ` Glauber Costa
2012-04-25 14:44           ` Glauber Costa
2012-04-27 12:22         ` Frederic Weisbecker
2012-04-27 12:22           ` Frederic Weisbecker
2012-04-27 12:22           ` Frederic Weisbecker
2012-04-30 20:56   ` Suleiman Souhlal
2012-04-30 20:56     ` Suleiman Souhlal
2012-04-30 20:56     ` Suleiman Souhlal
2012-05-02 15:34     ` Glauber Costa
2012-05-02 15:34       ` Glauber Costa
2012-04-22 23:53 ` [PATCH 18/23] slub: charge allocation to a memcg Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 19/23] slab: per-memcg accounting of slab caches Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-30 21:25   ` Suleiman Souhlal [this message]
2012-04-30 21:25     ` Suleiman Souhlal
2012-05-02 15:40     ` Glauber Costa
2012-05-02 15:40       ` Glauber Costa
2012-05-02 15:40       ` Glauber Costa
2012-04-22 23:53 ` [PATCH 20/23] memcg: disable kmem code when not in use Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 21/23] memcg: Track all the memcg children of a kmem_cache Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 22/23] memcg: Per-memcg memory.kmem.slabinfo file Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53 ` [PATCH 23/23] slub: create slabinfo file for memcg Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:53   ` Glauber Costa
2012-04-22 23:59 ` [PATCH 00/23] slab+slub accounting " Glauber Costa
2012-04-22 23:59   ` Glauber Costa
2012-04-22 23:59   ` Glauber Costa
2012-04-30  9:59 ` [PATCH 0/3] A few fixes for '[PATCH 00/23] slab+slub accounting for memcg' series Anton Vorontsov
2012-04-30  9:59   ` Anton Vorontsov
2012-04-30  9:59   ` Anton Vorontsov
2012-04-30 10:01   ` [PATCH 1/3] slab: Proper off-slabs handling when duplicating caches Anton Vorontsov
2012-04-30 10:01     ` Anton Vorontsov
2012-04-30 10:01   ` [PATCH 2/3] slab: Fix imbalanced rcu locking Anton Vorontsov
2012-04-30 10:01     ` Anton Vorontsov
2012-04-30 10:01     ` Anton Vorontsov
2012-04-30 10:02   ` [PATCH 3/3] slab: Get rid of mem_cgroup_put_kmem_cache() Anton Vorontsov
2012-04-30 10:02     ` Anton Vorontsov
2012-04-30 10:02     ` Anton Vorontsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CABCjUKCX6MvOaS5s_n6tYcmfyDCgW60aXTG8ZbznmZOAfS=joA@mail.gmail.com' \
    --to=suleiman@google.com \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=devel@openvz.org \
    --cc=fweisbec@gmail.com \
    --cc=glommer@parallels.com \
    --cc=gthelen@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    --cc=penberg@cs.helsinki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.