From: Roman Gushchin <guro@fb.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Shakeel Butt <shakeelb@google.com>, <linux-mm@kvack.org>,
Vlastimil Babka <vbabka@suse.cz>, <kernel-team@fb.com>,
<linux-kernel@vger.kernel.org>, Roman Gushchin <guro@fb.com>
Subject: [PATCH v6 04/19] mm: slub: implement SLUB version of obj_to_index()
Date: Mon, 8 Jun 2020 16:06:39 -0700 [thread overview]
Message-ID: <20200608230654.828134-5-guro@fb.com> (raw)
In-Reply-To: <20200608230654.828134-1-guro@fb.com>
This commit implements SLUB version of the obj_to_index() function,
which will be required to calculate the offset of obj_cgroup in the
obj_cgroups vector to store/obtain the objcg ownership data.
To make it faster, let's repeat the SLAB's trick introduced by
commit 6a2d7a955d8d ("[PATCH] SLAB: use a multiply instead of a
divide in obj_to_index()") and avoid an expensive division.
Vlastimil Babka noticed, that SLUB does have already a similar
function called slab_index(), which is defined only if SLUB_DEBUG
is enabled. The function does a similar math, but with a division,
and it also takes a page address instead of a page pointer.
Let's remove slab_index() and replace it with the new helper
__obj_to_index(), which takes a page address. obj_to_index()
will be a simple wrapper taking a page pointer and passing
page_address(page) into __obj_to_index().
Signed-off-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
---
include/linux/slub_def.h | 16 ++++++++++++++++
mm/slub.c | 15 +++++----------
2 files changed, 21 insertions(+), 10 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d2153789bd9f..30e91c83d401 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -8,6 +8,7 @@
* (C) 2007 SGI, Christoph Lameter
*/
#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -86,6 +87,7 @@ struct kmem_cache {
unsigned long min_partial;
unsigned int size; /* The size of an object including metadata */
unsigned int object_size;/* The size of an object without metadata */
+ struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
@@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
return result;
}
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct page *page, void *obj)
+{
+ return __obj_to_index(cache, page_address(page), obj);
+}
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/slub.c b/mm/slub.c
index 354e475db5ec..6007c38071f5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -313,12 +313,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
-/* Determine object index from a given position */
-static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
-{
- return (kasan_reset_tag(p) - addr) / s->size;
-}
-
static inline unsigned int order_objects(unsigned int order, unsigned int size)
{
return ((unsigned int)PAGE_SIZE << order) / size;
@@ -461,7 +455,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
bitmap_zero(object_map, page->objects);
for (p = page->freelist; p; p = get_freepointer(s, p))
- set_bit(slab_index(p, s, addr), object_map);
+ set_bit(__obj_to_index(s, addr, p), object_map);
return object_map;
}
@@ -3675,6 +3669,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
size = ALIGN(size, s->align);
s->size = size;
+ s->reciprocal_size = reciprocal_value(size);
if (forced_order >= 0)
order = forced_order;
else
@@ -3781,7 +3776,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
- if (!test_bit(slab_index(p, s, addr), map)) {
+ if (!test_bit(__obj_to_index(s, addr, p), map)) {
pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}
@@ -4506,7 +4501,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page)
/* Now we know that a valid freelist exists */
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
- u8 val = test_bit(slab_index(p, s, addr), map) ?
+ u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
if (!check_object(s, page, p, val))
@@ -4697,7 +4692,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
map = get_map(s, page);
for_each_object(p, s, addr, page->objects)
- if (!test_bit(slab_index(p, s, addr), map))
+ if (!test_bit(__obj_to_index(s, addr, p), map))
add_location(t, s, get_track(s, p, alloc));
put_map(map);
}
--
2.25.4
next prev parent reply other threads:[~2020-06-08 23:07 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-08 23:06 [PATCH v6 00/19] The new cgroup slab memory controller Roman Gushchin
2020-06-08 23:06 ` [PATCH v6 01/19] mm: memcg: factor out memcg- and lruvec-level changes out of __mod_lruvec_state() Roman Gushchin
2020-06-17 1:52 ` Shakeel Butt
2020-06-17 2:50 ` Roman Gushchin
2020-06-17 2:59 ` Shakeel Butt
2020-06-17 3:19 ` Roman Gushchin
2020-06-08 23:06 ` [PATCH v6 02/19] mm: memcg: prepare for byte-sized vmstat items Roman Gushchin
2020-06-17 2:57 ` Shakeel Butt
2020-06-17 3:19 ` Roman Gushchin
2020-06-17 15:55 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 03/19] mm: memcg: convert vmstat slab counters to bytes Roman Gushchin
2020-06-17 3:03 ` Shakeel Butt
2020-06-08 23:06 ` Roman Gushchin [this message]
2020-06-17 3:08 ` [PATCH v6 04/19] mm: slub: implement SLUB version of obj_to_index() Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 05/19] mm: memcontrol: decouple reference counting from page accounting Roman Gushchin
2020-06-18 0:47 ` Shakeel Butt
2020-06-18 14:55 ` Shakeel Butt
2020-06-18 19:51 ` Roman Gushchin
2020-06-19 1:08 ` Roman Gushchin
2020-06-19 1:18 ` Shakeel Butt
2020-06-19 1:31 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 06/19] mm: memcg/slab: obj_cgroup API Roman Gushchin
2020-06-19 15:42 ` Shakeel Butt
2020-06-19 21:38 ` Roman Gushchin
2020-06-19 22:16 ` Shakeel Butt
2020-06-19 22:52 ` Roman Gushchin
2020-06-20 22:50 ` Andrew Morton
2020-06-08 23:06 ` [PATCH v6 07/19] mm: memcg/slab: allocate obj_cgroups for non-root slab pages Roman Gushchin
2020-06-19 16:36 ` Shakeel Butt
2020-06-20 0:25 ` Roman Gushchin
2020-06-20 0:31 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 08/19] mm: memcg/slab: save obj_cgroup for non-root slab objects Roman Gushchin
2020-06-20 0:16 ` Shakeel Butt
2020-06-20 1:19 ` Roman Gushchin
2020-06-08 23:06 ` [PATCH v6 09/19] mm: memcg/slab: charge individual slab objects instead of pages Roman Gushchin
2020-06-20 0:54 ` Shakeel Butt
2020-06-20 1:29 ` Roman Gushchin
2020-06-08 23:06 ` [PATCH v6 10/19] mm: memcg/slab: deprecate memory.kmem.slabinfo Roman Gushchin
2020-06-22 17:12 ` Shakeel Butt
2020-06-22 18:01 ` Roman Gushchin
2020-06-22 18:09 ` Shakeel Butt
2020-06-22 18:25 ` Roman Gushchin
2020-06-22 18:38 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 11/19] mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h Roman Gushchin
2020-06-20 1:19 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 12/19] mm: memcg/slab: use a single set of kmem_caches for all accounted allocations Roman Gushchin
2020-06-22 16:56 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 13/19] mm: memcg/slab: simplify memcg cache creation Roman Gushchin
2020-06-22 17:29 ` Shakeel Butt
2020-06-22 17:40 ` Roman Gushchin
2020-06-22 18:03 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 14/19] mm: memcg/slab: remove memcg_kmem_get_cache() Roman Gushchin
2020-06-22 18:42 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 15/19] mm: memcg/slab: deprecate slab_root_caches Roman Gushchin
2020-06-22 17:36 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 16/19] mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() Roman Gushchin
2020-06-22 17:32 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 17/19] mm: memcg/slab: use a single set of kmem_caches for all allocations Roman Gushchin
2020-06-17 23:35 ` Andrew Morton
2020-06-18 0:35 ` Roman Gushchin
2020-06-18 7:33 ` Vlastimil Babka
2020-06-18 19:54 ` Roman Gushchin
2020-06-22 19:21 ` Shakeel Butt
2020-06-22 20:37 ` Roman Gushchin
2020-06-22 21:04 ` Shakeel Butt
2020-06-22 21:13 ` Roman Gushchin
2020-06-22 21:28 ` Shakeel Butt
2020-06-22 21:58 ` Roman Gushchin
2020-06-22 22:05 ` Shakeel Butt
2020-06-08 23:06 ` [PATCH v6 18/19] kselftests: cgroup: add kernel memory accounting tests Roman Gushchin
2020-06-08 23:06 ` [PATCH v6 19/19] tools/cgroup: add memcg_slabinfo.py tool Roman Gushchin
2020-06-17 1:46 ` [PATCH v6 00/19] The new cgroup slab memory controller Shakeel Butt
2020-06-17 2:41 ` Roman Gushchin
2020-06-17 3:05 ` Shakeel Butt
2020-06-17 3:32 ` Roman Gushchin
2020-06-17 11:24 ` Vlastimil Babka
2020-06-17 14:31 ` Mel Gorman
2020-06-20 0:57 ` Roman Gushchin
2020-06-18 1:29 ` Roman Gushchin
2020-06-18 8:43 ` Jesper Dangaard Brouer
2020-06-18 9:31 ` Jesper Dangaard Brouer
2020-06-19 1:30 ` Roman Gushchin
2020-06-19 8:32 ` Jesper Dangaard Brouer
2020-06-19 1:27 ` Roman Gushchin
2020-06-19 9:39 ` Jesper Dangaard Brouer
2020-06-19 18:47 ` Roman Gushchin
2020-06-18 9:27 ` Mike Rapoport
2020-06-18 20:43 ` Roman Gushchin
2020-06-21 22:57 ` Qian Cai
2020-06-21 23:34 ` Roman Gushchin
[not found] ` <7DEA68BC-3B9A-46FA-8103-AE6B72324591@lca.pw>
2020-06-22 3:07 ` Roman Gushchin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200608230654.828134-5-guro@fb.com \
--to=guro@fb.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=hannes@cmpxchg.org \
--cc=kernel-team@fb.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=shakeelb@google.com \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).