From: Roman Gushchin <guro@fb.com>
To: <linux-mm@kvack.org>, Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@kernel.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Shakeel Butt <shakeelb@google.com>,
Vladimir Davydov <vdavydov.dev@gmail.com>,
<linux-kernel@vger.kernel.org>, <kernel-team@fb.com>,
Bharata B Rao <bharata@linux.ibm.com>,
Yafang Shao <laoar.shao@gmail.com>, Roman Gushchin <guro@fb.com>
Subject: [PATCH v2 16/28] mm: memcg/slab: allocate obj_cgroups for non-root slab pages
Date: Mon, 27 Jan 2020 09:34:41 -0800 [thread overview]
Message-ID: <20200127173453.2089565-17-guro@fb.com> (raw)
In-Reply-To: <20200127173453.2089565-1-guro@fb.com>
Allocate and release memory to store obj_cgroup pointers for each
non-root slab page. Reuse page->mem_cgroup pointer to store a pointer
to the allocated space.
To distinguish between obj_cgroups and memcg pointers in case
when it's not obvious which one is used (as in page_cgroup_ino()),
let's always set the lowest bit in the obj_cgroup case.
Signed-off-by: Roman Gushchin <guro@fb.com>
---
include/linux/mm.h | 25 ++++++++++++++++++--
include/linux/mm_types.h | 5 +++-
mm/memcontrol.c | 5 ++--
mm/slab.c | 3 ++-
mm/slab.h | 51 +++++++++++++++++++++++++++++++++++++++-
mm/slub.c | 2 +-
6 files changed, 83 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 080f8ac8bfb7..65224becc4ca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1264,12 +1264,33 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#ifdef CONFIG_MEMCG
static inline struct mem_cgroup *page_memcg(struct page *page)
{
- return page->mem_cgroup;
+ struct mem_cgroup *memcg = page->mem_cgroup;
+
+ /*
+ * The lowest bit set means that memcg isn't a valid memcg pointer,
+ * but a obj_cgroups pointer. In this case the page is shared and
+ * isn't charged to any specific memory cgroup. Return NULL.
+ */
+ if ((unsigned long) memcg & 0x1UL)
+ memcg = NULL;
+
+ return memcg;
}
static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
{
+ struct mem_cgroup *memcg = READ_ONCE(page->mem_cgroup);
+
WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
+
+ /*
+ * The lowest bit set means that memcg isn't a valid memcg pointer,
+ * but a obj_cgroups pointer. In this case the page is shared and
+ * isn't charged to any specific memory cgroup. Return NULL.
+ */
+ if ((unsigned long) memcg & 0x1UL)
+ memcg = NULL;
+
+ return memcg;
}
#else
static inline struct mem_cgroup *page_memcg(struct page *page)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 270aa8fd2800..5102f00f3336 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -198,7 +198,10 @@ struct page {
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
+ union {
+ struct mem_cgroup *mem_cgroup;
+ struct obj_cgroup **obj_cgroups;
+ };
#endif
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9aa37bc61db5..94337ab1ebe9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -545,7 +545,8 @@ ino_t page_cgroup_ino(struct page *page)
if (PageSlab(page) && !PageTail(page))
memcg = memcg_from_slab_page(page);
else
- memcg = READ_ONCE(page->mem_cgroup);
+ memcg = page_memcg_rcu(page);
+
while (memcg && !(memcg->css.flags & CSS_ONLINE))
memcg = parent_mem_cgroup(memcg);
if (memcg)
@@ -2783,7 +2784,7 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
return memcg_from_slab_page(page);
/* All other pages use page->mem_cgroup */
- return page->mem_cgroup;
+ return page_memcg(page);
}
static int memcg_alloc_cache_id(void)
diff --git a/mm/slab.c b/mm/slab.c
index a89633603b2d..22e161b57367 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1370,7 +1370,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
+ if (charge_slab_page(page, flags, cachep->gfporder, cachep,
+ cachep->num)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
diff --git a/mm/slab.h b/mm/slab.h
index 7925f7005161..8ee8c3a250ac 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -319,6 +319,18 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s->memcg_params.root_cache;
}
+static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
+{
+ /*
+ * page->mem_cgroup and page->obj_cgroups are sharing the same
+ * space. To distinguish between them in case we don't know for sure
+ * that the page is a slab page (e.g. page_cgroup_ino()), let's
+ * always set the lowest bit of obj_cgroups.
+ */
+ return (struct obj_cgroup **)
+ ((unsigned long)page->obj_cgroups & ~0x1UL);
+}
+
/*
* Expects a pointer to a slab page. Please note, that PageSlab() check
* isn't sufficient, as it returns true also for tail compound slab pages,
@@ -406,6 +418,25 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
+ unsigned int objects)
+{
+ void *vec;
+
+ vec = kcalloc(objects, sizeof(struct obj_cgroup *), gfp);
+ if (!vec)
+ return -ENOMEM;
+
+ page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+ kfree(page_obj_cgroups(page));
+ page->obj_cgroups = NULL;
+}
+
extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
@@ -455,6 +486,16 @@ static inline void memcg_uncharge_slab(struct page *page, int order,
{
}
+static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
+ unsigned int objects)
+{
+ return 0;
+}
+
+static inline void memcg_free_page_obj_cgroups(struct page *page)
+{
+}
+
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
@@ -479,14 +520,21 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
static __always_inline int charge_slab_page(struct page *page,
gfp_t gfp, int order,
- struct kmem_cache *s)
+ struct kmem_cache *s,
+ unsigned int objects)
{
+ int ret;
+
if (is_root_cache(s)) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
PAGE_SIZE << order);
return 0;
}
+ ret = memcg_alloc_page_obj_cgroups(page, gfp, objects);
+ if (ret)
+ return ret;
+
return memcg_charge_slab(page, gfp, order, s);
}
@@ -499,6 +547,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
return;
}
+ memcg_free_page_obj_cgroups(page);
memcg_uncharge_slab(page, order, s);
}
diff --git a/mm/slub.c b/mm/slub.c
index ed6aea234400..165e43076c8b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1516,7 +1516,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
- if (page && charge_slab_page(page, flags, order, s)) {
+ if (page && charge_slab_page(page, flags, order, s, oo_objects(oo))) {
__free_pages(page, order);
page = NULL;
}
--
2.24.1
next prev parent reply other threads:[~2020-01-27 17:43 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-27 17:34 [PATCH v2 00/28] The new cgroup slab memory controller Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 01/28] mm: kmem: cleanup (__)memcg_kmem_charge_memcg() arguments Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 02/28] mm: kmem: cleanup memcg_kmem_uncharge_memcg() arguments Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 03/28] mm: kmem: rename memcg_kmem_(un)charge() into memcg_kmem_(un)charge_page() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 04/28] mm: kmem: switch to nr_pages in (__)memcg_kmem_charge_memcg() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 05/28] mm: memcg/slab: cache page number in memcg_(un)charge_slab() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 06/28] mm: kmem: rename (__)memcg_kmem_(un)charge_memcg() to __memcg_kmem_(un)charge() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 07/28] mm: memcg/slab: introduce mem_cgroup_from_obj() Roman Gushchin
2020-02-03 16:05 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 08/28] mm: fork: fix kernel_stack memcg stats for various stack implementations Roman Gushchin
2020-02-03 16:12 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 09/28] mm: memcg/slab: rename __mod_lruvec_slab_state() into __mod_lruvec_obj_state() Roman Gushchin
2020-02-03 16:13 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 10/28] mm: memcg: introduce mod_lruvec_memcg_state() Roman Gushchin
2020-02-03 17:39 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 11/28] mm: slub: implement SLUB version of obj_to_index() Roman Gushchin
2020-02-03 17:44 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 12/28] mm: vmstat: use s32 for vm_node_stat_diff in struct per_cpu_nodestat Roman Gushchin
2020-02-03 17:58 ` Johannes Weiner
2020-02-03 18:25 ` Roman Gushchin
2020-02-03 20:34 ` Johannes Weiner
2020-02-03 22:28 ` Roman Gushchin
2020-02-03 22:39 ` Johannes Weiner
2020-02-04 1:44 ` Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 13/28] mm: vmstat: convert slab vmstat counter to bytes Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 14/28] mm: memcontrol: decouple reference counting from page accounting Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 15/28] mm: memcg/slab: obj_cgroup API Roman Gushchin
2020-02-03 19:31 ` Johannes Weiner
2020-01-27 17:34 ` Roman Gushchin [this message]
2020-02-03 18:27 ` [PATCH v2 16/28] mm: memcg/slab: allocate obj_cgroups for non-root slab pages Johannes Weiner
2020-02-03 18:34 ` Roman Gushchin
2020-02-03 20:46 ` Johannes Weiner
2020-02-03 21:19 ` Roman Gushchin
2020-02-03 22:29 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 17/28] mm: memcg/slab: save obj_cgroup for non-root slab objects Roman Gushchin
2020-02-03 19:53 ` Johannes Weiner
2020-01-27 17:34 ` [PATCH v2 18/28] mm: memcg/slab: charge individual slab objects instead of pages Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 19/28] mm: memcg/slab: deprecate memory.kmem.slabinfo Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 20/28] mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 21/28] mm: memcg/slab: use a single set of kmem_caches for all memory cgroups Roman Gushchin
2020-02-03 19:50 ` Johannes Weiner
2020-02-03 20:58 ` Roman Gushchin
2020-02-03 22:17 ` Johannes Weiner
2020-02-03 22:38 ` Roman Gushchin
2020-02-04 1:15 ` Roman Gushchin
2020-02-04 2:47 ` Johannes Weiner
2020-02-04 4:35 ` Roman Gushchin
2020-02-04 18:41 ` Johannes Weiner
2020-02-05 15:58 ` Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 22/28] mm: memcg/slab: simplify memcg cache creation Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 23/28] mm: memcg/slab: deprecate memcg_kmem_get_cache() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 24/28] mm: memcg/slab: deprecate slab_root_caches Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 25/28] mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 26/28] tools/cgroup: add slabinfo.py tool Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 27/28] tools/cgroup: make slabinfo.py compatible with new slab controller Roman Gushchin
2020-01-30 2:17 ` Bharata B Rao
2020-01-30 2:44 ` Roman Gushchin
2020-01-31 22:24 ` Roman Gushchin
2020-02-12 5:21 ` Bharata B Rao
2020-02-12 20:42 ` Roman Gushchin
2020-01-27 17:34 ` [PATCH v2 28/28] kselftests: cgroup: add kernel memory accounting tests Roman Gushchin
2020-01-30 2:06 ` [PATCH v2 00/28] The new cgroup slab memory controller Bharata B Rao
2020-01-30 2:41 ` Roman Gushchin
2020-08-12 23:16 ` Pavel Tatashin
2020-08-12 23:18 ` Pavel Tatashin
2020-08-13 0:04 ` Roman Gushchin
2020-08-13 0:31 ` Pavel Tatashin
2020-08-28 16:47 ` Pavel Tatashin
2020-09-01 5:28 ` Bharata B Rao
2020-09-01 12:52 ` Pavel Tatashin
2020-09-02 6:23 ` Bharata B Rao
2020-09-02 12:34 ` Pavel Tatashin
2020-09-02 9:53 ` Vlastimil Babka
2020-09-02 10:39 ` David Hildenbrand
2020-09-02 12:42 ` Pavel Tatashin
2020-09-02 13:50 ` Michal Hocko
2020-09-02 14:20 ` Pavel Tatashin
2020-09-03 18:09 ` David Hildenbrand
2020-09-02 11:26 ` Michal Hocko
2020-09-02 12:51 ` Pavel Tatashin
2020-09-02 13:51 ` Michal Hocko
2020-09-02 11:32 ` Michal Hocko
2020-09-02 12:53 ` Pavel Tatashin
2020-09-02 13:52 ` Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200127173453.2089565-17-guro@fb.com \
--to=guro@fb.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@linux.ibm.com \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@fb.com \
--cc=laoar.shao@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=shakeelb@google.com \
--cc=vdavydov.dev@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).