From: Roman Gushchin <guro@fb.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>, <linux-mm@kvack.org>,
<kernel-team@fb.com>, <linux-kernel@vger.kernel.org>,
Roman Gushchin <guro@fb.com>
Subject: [PATCH v3 07/19] mm: memcg/slab: allocate obj_cgroups for non-root slab pages
Date: Wed, 22 Apr 2020 13:46:56 -0700 [thread overview]
Message-ID: <20200422204708.2176080-8-guro@fb.com> (raw)
In-Reply-To: <20200422204708.2176080-1-guro@fb.com>
Allocate and release memory to store obj_cgroup pointers for each
non-root slab page. Reuse page->mem_cgroup pointer to store a pointer
to the allocated space.
To distinguish between obj_cgroups and memcg pointers in case
when it's not obvious which one is used (as in page_cgroup_ino()),
let's always set the lowest bit in the obj_cgroup case.
Signed-off-by: Roman Gushchin <guro@fb.com>
---
include/linux/mm_types.h | 5 ++++-
include/linux/slab_def.h | 5 +++++
include/linux/slub_def.h | 2 ++
mm/memcontrol.c | 17 +++++++++++---
mm/slab.c | 3 ++-
mm/slab.h | 48 ++++++++++++++++++++++++++++++++++++++++
mm/slub.c | 5 +++++
7 files changed, 80 insertions(+), 5 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba8..0ad7e700f26d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -198,7 +198,10 @@ struct page {
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
+ union {
+ struct mem_cgroup *mem_cgroup;
+ struct obj_cgroup **obj_cgroups;
+ };
#endif
/*
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index abc7de77b988..967a9a525eab 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -114,4 +114,9 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
+static inline int objs_per_slab(const struct kmem_cache *cache)
+{
+ return cache->num;
+}
+
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 200ea292f250..cbda7d55796a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -191,4 +191,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
cache->reciprocal_size);
}
+extern int objs_per_slab(struct kmem_cache *cache);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7f87a0eeafec..63826e460b3f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -549,10 +549,21 @@ ino_t page_cgroup_ino(struct page *page)
unsigned long ino = 0;
rcu_read_lock();
- if (PageSlab(page) && !PageTail(page))
+ if (PageSlab(page) && !PageTail(page)) {
memcg = memcg_from_slab_page(page);
- else
- memcg = READ_ONCE(page->mem_cgroup);
+ } else {
+ memcg = page->mem_cgroup;
+
+ /*
+ * The lowest bit set means that memcg isn't a valid
+ * memcg pointer, but a obj_cgroups pointer.
+ * In this case the page is shared and doesn't belong
+ * to any specific memory cgroup.
+ */
+ if ((unsigned long) memcg & 0x1UL)
+ memcg = NULL;
+ }
+
while (memcg && !(memcg->css.flags & CSS_ONLINE))
memcg = parent_mem_cgroup(memcg);
if (memcg)
diff --git a/mm/slab.c b/mm/slab.c
index 9350062ffc1a..f2d67984595b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1370,7 +1370,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
+ if (charge_slab_page(page, flags, cachep->gfporder, cachep,
+ cachep->num)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
diff --git a/mm/slab.h b/mm/slab.h
index 8a574d9361c1..44def57f050e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -319,6 +319,18 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s->memcg_params.root_cache;
}
+static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
+{
+ /*
+ * page->mem_cgroup and page->obj_cgroups are sharing the same
+ * space. To distinguish between them in case we don't know for sure
+ * that the page is a slab page (e.g. page_cgroup_ino()), let's
+ * always set the lowest bit of obj_cgroups.
+ */
+ return (struct obj_cgroup **)
+ ((unsigned long)page->obj_cgroups & ~0x1UL);
+}
+
/*
* Expects a pointer to a slab page. Please note, that PageSlab() check
* isn't sufficient, as it returns true also for tail compound slab pages,
@@ -406,6 +418,25 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
+ unsigned int objects)
+{
+ void *vec;
+
+ vec = kcalloc(objects, sizeof(struct obj_cgroup *), gfp);
+ if (!vec)
+ return -ENOMEM;
+
+ page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+ kfree(page_obj_cgroups(page));
+ page->obj_cgroups = NULL;
+}
+
extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
@@ -455,6 +486,16 @@ static inline void memcg_uncharge_slab(struct page *page, int order,
{
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
+ unsigned int objects)
+{
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+}
+
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
@@ -481,12 +522,18 @@ static __always_inline int charge_slab_page(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
+ int ret;
+
if (is_root_cache(s)) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
PAGE_SIZE << order);
return 0;
}
+ ret = memcg_alloc_page_obj_cgroups(page, gfp, objs_per_slab(s));
+ if (ret)
+ return ret;
+
return memcg_charge_slab(page, gfp, order, s);
}
@@ -499,6 +546,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
return;
}
+ memcg_free_page_obj_cgroups(page);
memcg_uncharge_slab(page, order, s);
}
diff --git a/mm/slub.c b/mm/slub.c
index 8d16babe1829..68c2c45dfac1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5992,4 +5992,9 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
{
return -EIO;
}
+
+int objs_per_slab(struct kmem_cache *cache)
+{
+ return oo_objects(cache->oo);
+}
#endif /* CONFIG_SLUB_DEBUG */
--
2.25.3
next prev parent reply other threads:[~2020-04-22 20:48 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-22 20:46 [PATCH v3 00/19] The new cgroup slab memory controller Roman Gushchin
2020-04-22 20:46 ` [PATCH v3 01/19] mm: memcg: factor out memcg- and lruvec-level changes out of __mod_lruvec_state() Roman Gushchin
2020-05-07 20:33 ` Johannes Weiner
2020-05-20 10:49 ` Vlastimil Babka
2020-04-22 20:46 ` [PATCH v3 02/19] mm: memcg: prepare for byte-sized vmstat items Roman Gushchin
2020-05-07 20:34 ` Johannes Weiner
2020-05-20 11:31 ` Vlastimil Babka
2020-05-20 11:36 ` Vlastimil Babka
2020-04-22 20:46 ` [PATCH v3 03/19] mm: memcg: convert vmstat slab counters to bytes Roman Gushchin
2020-05-07 20:41 ` Johannes Weiner
2020-05-20 12:25 ` Vlastimil Babka
2020-05-20 19:26 ` Roman Gushchin
2020-05-21 9:57 ` Vlastimil Babka
2020-05-21 21:14 ` Roman Gushchin
2020-04-22 20:46 ` [PATCH v3 04/19] mm: slub: implement SLUB version of obj_to_index() Roman Gushchin
2020-04-22 23:52 ` Christopher Lameter
2020-04-23 0:05 ` Roman Gushchin
2020-04-25 2:10 ` Christopher Lameter
2020-04-25 2:46 ` Roman Gushchin
2020-04-27 16:21 ` Christopher Lameter
2020-04-27 16:46 ` Roman Gushchin
2020-04-28 17:06 ` Roman Gushchin
2020-04-28 17:45 ` Johannes Weiner
2020-04-30 16:29 ` Christopher Lameter
2020-04-30 17:15 ` Roman Gushchin
2020-05-02 23:54 ` Christopher Lameter
2020-05-04 18:29 ` Roman Gushchin
2020-05-08 21:35 ` Christopher Lameter
2020-05-13 0:57 ` Roman Gushchin
2020-05-15 21:45 ` Christopher Lameter
2020-05-15 22:12 ` Roman Gushchin
2020-05-20 9:51 ` Vlastimil Babka
2020-05-20 20:57 ` Roman Gushchin
2020-05-15 20:02 ` Roman Gushchin
2020-04-23 21:01 ` Roman Gushchin
2020-04-25 2:10 ` Christopher Lameter
2020-05-20 13:51 ` Vlastimil Babka
2020-05-20 21:00 ` Roman Gushchin
2020-05-21 11:01 ` Vlastimil Babka
2020-05-21 21:06 ` Roman Gushchin
2020-04-22 20:46 ` [PATCH v3 05/19] mm: memcontrol: decouple reference counting from page accounting Roman Gushchin
2020-04-22 20:46 ` [PATCH v3 06/19] mm: memcg/slab: obj_cgroup API Roman Gushchin
2020-05-07 21:03 ` Johannes Weiner
2020-05-07 22:26 ` Roman Gushchin
2020-05-12 22:56 ` Johannes Weiner
2020-05-15 22:01 ` Roman Gushchin
2020-04-22 20:46 ` Roman Gushchin [this message]
2020-04-23 20:20 ` [PATCH v3 07/19] mm: memcg/slab: allocate obj_cgroups for non-root slab pages Roman Gushchin
2020-05-22 18:27 ` Vlastimil Babka
2020-05-23 1:32 ` Roman Gushchin
2020-05-26 17:50 ` Roman Gushchin
2020-05-25 14:46 ` Vlastimil Babka
2020-04-22 20:46 ` [PATCH v3 08/19] mm: memcg/slab: save obj_cgroup for non-root slab objects Roman Gushchin
2020-05-25 15:07 ` Vlastimil Babka
2020-05-26 17:53 ` Roman Gushchin
2020-05-27 11:03 ` Vlastimil Babka
2020-04-22 20:46 ` [PATCH v3 09/19] mm: memcg/slab: charge individual slab objects instead of pages Roman Gushchin
2020-05-25 16:10 ` Vlastimil Babka
2020-05-26 18:04 ` Roman Gushchin
2020-04-22 20:46 ` [PATCH v3 10/19] mm: memcg/slab: deprecate memory.kmem.slabinfo Roman Gushchin
2020-05-07 21:05 ` Johannes Weiner
2020-04-22 20:47 ` [PATCH v3 11/19] mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h Roman Gushchin
2020-05-25 17:03 ` Vlastimil Babka
2020-04-22 20:47 ` [PATCH v3 12/19] mm: memcg/slab: use a single set of kmem_caches for all accounted allocations Roman Gushchin
2020-05-26 10:12 ` Vlastimil Babka
2020-04-22 20:47 ` [PATCH v3 13/19] mm: memcg/slab: simplify memcg cache creation Roman Gushchin
2020-05-26 10:31 ` Vlastimil Babka
2020-04-22 20:47 ` [PATCH v3 14/19] mm: memcg/slab: deprecate memcg_kmem_get_cache() Roman Gushchin
2020-05-26 10:34 ` Vlastimil Babka
2020-04-22 20:47 ` [PATCH v3 15/19] mm: memcg/slab: deprecate slab_root_caches Roman Gushchin
2020-05-26 10:52 ` Vlastimil Babka
2020-05-26 18:50 ` Roman Gushchin
2020-04-22 20:47 ` [PATCH v3 16/19] mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() Roman Gushchin
2020-05-26 11:31 ` Vlastimil Babka
2020-04-22 20:47 ` [PATCH v3 17/19] mm: memcg/slab: use a single set of kmem_caches for all allocations Roman Gushchin
2020-05-26 14:55 ` Vlastimil Babka
2020-05-27 8:35 ` Jesper Dangaard Brouer
2020-04-22 20:47 ` [PATCH v3 18/19] kselftests: cgroup: add kernel memory accounting tests Roman Gushchin
2020-05-26 15:24 ` Vlastimil Babka
2020-05-26 15:45 ` Roman Gushchin
2020-05-27 17:00 ` Vlastimil Babka
2020-05-27 20:45 ` Roman Gushchin
2020-04-22 20:47 ` [PATCH v3 19/19] tools/cgroup: add memcg_slabinfo.py tool Roman Gushchin
2020-05-05 15:59 ` Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200422204708.2176080-8-guro@fb.com \
--to=guro@fb.com \
--cc=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@fb.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).