* [v4 PATCH 01/11] mm: vmscan: use nid from shrink_control for tracepoint
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 02/11] mm: vmscan: consolidate shrinker_maps handling code Yang Shi
` (9 subsequent siblings)
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
The tracepoint's nid should show what node the shrink happens on, the start tracepoint
uses nid from shrinkctl, but the nid might be set to 0 before end tracepoint if the
shrinker is not NUMA aware, so the traceing log may show the shrink happens on one
node but end up on the other node. It seems confusing. And the following patch
will remove using nid directly in do_shrink_slab(), this patch also helps cleanup
the code.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1b574ad199d..b512dd5e3a1c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -535,7 +535,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
else
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
- trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
+ trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
return freed;
}
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 02/11] mm: vmscan: consolidate shrinker_maps handling code
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
2021-01-21 23:06 ` [v4 PATCH 01/11] mm: vmscan: use nid from shrink_control for tracepoint Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 03/11] mm: vmscan: use shrinker_rwsem to protect shrinker_maps allocation Yang Shi
` (8 subsequent siblings)
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
The shrinker map management is not purely memcg specific, it is at the intersection
between memory cgroup and shrinkers. It's allocation and assignment of a structure,
and the only memcg bit is the map is being stored in a memcg structure. So move the
shrinker_maps handling code into vmscan.c for tighter integration with shrinker code,
and remove the "memcg_" prefix. There is no functional change.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
include/linux/memcontrol.h | 12 ++--
mm/huge_memory.c | 4 +-
mm/list_lru.c | 6 +-
mm/memcontrol.c | 130 +------------------------------------
mm/vmscan.c | 130 ++++++++++++++++++++++++++++++++++++-
5 files changed, 142 insertions(+), 140 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index eeb0b52203e9..0ee2924991fb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1581,10 +1581,10 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return false;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
+extern int alloc_shrinker_maps(struct mem_cgroup *memcg);
+extern void free_shrinker_maps(struct mem_cgroup *memcg);
+extern void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1594,8 +1594,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return false;
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id)
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
{
}
#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9237976abe72..05190d7f32ae 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2823,8 +2823,8 @@ void deferred_split_huge_page(struct page *page)
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
if (memcg)
- memcg_set_shrinker_bit(memcg, page_to_nid(page),
- deferred_split_shrinker.id);
+ set_shrinker_bit(memcg, page_to_nid(page),
+ deferred_split_shrinker.id);
#endif
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index fe230081690b..628030fa5f69 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -125,8 +125,8 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
- memcg_set_shrinker_bit(memcg, nid,
- lru_shrinker_id(lru));
+ set_shrinker_bit(memcg, nid,
+ lru_shrinker_id(lru));
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
@@ -548,7 +548,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
if (src->nr_items) {
dst->nr_items += src->nr_items;
- memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
+ set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
src->nr_items = 0;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 605f671203ef..76a557520a1a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -397,130 +397,6 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
EXPORT_SYMBOL(memcg_kmem_enabled_key);
#endif
-static int memcg_shrinker_map_size;
-static DEFINE_MUTEX(memcg_shrinker_map_mutex);
-
-static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
-{
- kvfree(container_of(head, struct memcg_shrinker_map, rcu));
-}
-
-static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
- int size, int old_size)
-{
- struct memcg_shrinker_map *new, *old;
- int nid;
-
- lockdep_assert_held(&memcg_shrinker_map_mutex);
-
- for_each_node(nid) {
- old = rcu_dereference_protected(
- mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
- /* Not yet online memcg */
- if (!old)
- return 0;
-
- new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
- if (!new)
- return -ENOMEM;
-
- /* Set all old bits, clear all new bits */
- memset(new->map, (int)0xff, old_size);
- memset((void *)new->map + old_size, 0, size - old_size);
-
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
- call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
- }
-
- return 0;
-}
-
-static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
-{
- struct mem_cgroup_per_node *pn;
- struct memcg_shrinker_map *map;
- int nid;
-
- if (mem_cgroup_is_root(memcg))
- return;
-
- for_each_node(nid) {
- pn = mem_cgroup_nodeinfo(memcg, nid);
- map = rcu_dereference_protected(pn->shrinker_map, true);
- if (map)
- kvfree(map);
- rcu_assign_pointer(pn->shrinker_map, NULL);
- }
-}
-
-static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
-{
- struct memcg_shrinker_map *map;
- int nid, size, ret = 0;
-
- if (mem_cgroup_is_root(memcg))
- return 0;
-
- mutex_lock(&memcg_shrinker_map_mutex);
- size = memcg_shrinker_map_size;
- for_each_node(nid) {
- map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
- if (!map) {
- memcg_free_shrinker_maps(memcg);
- ret = -ENOMEM;
- break;
- }
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
- }
- mutex_unlock(&memcg_shrinker_map_mutex);
-
- return ret;
-}
-
-int memcg_expand_shrinker_maps(int new_id)
-{
- int size, old_size, ret = 0;
- struct mem_cgroup *memcg;
-
- size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
- old_size = memcg_shrinker_map_size;
- if (size <= old_size)
- return 0;
-
- mutex_lock(&memcg_shrinker_map_mutex);
- if (!root_mem_cgroup)
- goto unlock;
-
- for_each_mem_cgroup(memcg) {
- if (mem_cgroup_is_root(memcg))
- continue;
- ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
- if (ret) {
- mem_cgroup_iter_break(NULL, memcg);
- goto unlock;
- }
- }
-unlock:
- if (!ret)
- memcg_shrinker_map_size = size;
- mutex_unlock(&memcg_shrinker_map_mutex);
- return ret;
-}
-
-void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
-{
- if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
- struct memcg_shrinker_map *map;
-
- rcu_read_lock();
- map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
- /* Pairs with smp mb in shrink_slab() */
- smp_mb__before_atomic();
- set_bit(shrinker_id, map->map);
- rcu_read_unlock();
- }
-}
-
/**
* mem_cgroup_css_from_page - css of the memcg associated with a page
* @page: page of interest
@@ -5372,11 +5248,11 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
/*
- * A memcg must be visible for memcg_expand_shrinker_maps()
+ * A memcg must be visible for expand_shrinker_maps()
* by the time the maps are allocated. So, we allocate maps
* here, when for_each_mem_cgroup() can't skip it.
*/
- if (memcg_alloc_shrinker_maps(memcg)) {
+ if (alloc_shrinker_maps(memcg)) {
mem_cgroup_id_remove(memcg);
return -ENOMEM;
}
@@ -5440,7 +5316,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
vmpressure_cleanup(&memcg->vmpressure);
cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg);
- memcg_free_shrinker_maps(memcg);
+ free_shrinker_maps(memcg);
memcg_free_kmem(memcg);
mem_cgroup_free(memcg);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b512dd5e3a1c..d950cead66ca 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -185,6 +185,132 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
+
+static int memcg_shrinker_map_size;
+static DEFINE_MUTEX(memcg_shrinker_map_mutex);
+
+static void free_shrinker_map_rcu(struct rcu_head *head)
+{
+ kvfree(container_of(head, struct memcg_shrinker_map, rcu));
+}
+
+static int expand_one_shrinker_map(struct mem_cgroup *memcg,
+ int size, int old_size)
+{
+ struct memcg_shrinker_map *new, *old;
+ int nid;
+
+ lockdep_assert_held(&memcg_shrinker_map_mutex);
+
+ for_each_node(nid) {
+ old = rcu_dereference_protected(
+ mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
+ /* Not yet online memcg */
+ if (!old)
+ return 0;
+
+ new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
+ if (!new)
+ return -ENOMEM;
+
+ /* Set all old bits, clear all new bits */
+ memset(new->map, (int)0xff, old_size);
+ memset((void *)new->map + old_size, 0, size - old_size);
+
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
+ call_rcu(&old->rcu, free_shrinker_map_rcu);
+ }
+
+ return 0;
+}
+
+void free_shrinker_maps(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup_per_node *pn;
+ struct memcg_shrinker_map *map;
+ int nid;
+
+ if (mem_cgroup_is_root(memcg))
+ return;
+
+ for_each_node(nid) {
+ pn = mem_cgroup_nodeinfo(memcg, nid);
+ map = rcu_dereference_protected(pn->shrinker_map, true);
+ if (map)
+ kvfree(map);
+ rcu_assign_pointer(pn->shrinker_map, NULL);
+ }
+}
+
+int alloc_shrinker_maps(struct mem_cgroup *memcg)
+{
+ struct memcg_shrinker_map *map;
+ int nid, size, ret = 0;
+
+ if (mem_cgroup_is_root(memcg))
+ return 0;
+
+ mutex_lock(&memcg_shrinker_map_mutex);
+ size = memcg_shrinker_map_size;
+ for_each_node(nid) {
+ map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
+ if (!map) {
+ free_shrinker_maps(memcg);
+ ret = -ENOMEM;
+ break;
+ }
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
+ }
+ mutex_unlock(&memcg_shrinker_map_mutex);
+
+ return ret;
+}
+
+static int expand_shrinker_maps(int new_id)
+{
+ int size, old_size, ret = 0;
+ struct mem_cgroup *memcg;
+
+ size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
+ old_size = memcg_shrinker_map_size;
+ if (size <= old_size)
+ return 0;
+
+ mutex_lock(&memcg_shrinker_map_mutex);
+ if (!root_mem_cgroup)
+ goto unlock;
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ if (mem_cgroup_is_root(memcg))
+ continue;
+ ret = expand_one_shrinker_map(memcg, size, old_size);
+ if (ret) {
+ mem_cgroup_iter_break(NULL, memcg);
+ goto unlock;
+ }
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+unlock:
+ if (!ret)
+ memcg_shrinker_map_size = size;
+ mutex_unlock(&memcg_shrinker_map_mutex);
+ return ret;
+}
+
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
+{
+ if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
+ struct memcg_shrinker_map *map;
+
+ rcu_read_lock();
+ map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
+ /* Pairs with smp mb in shrink_slab() */
+ smp_mb__before_atomic();
+ set_bit(shrinker_id, map->map);
+ rcu_read_unlock();
+ }
+}
+
/*
* We allow subsystems to populate their shrinker-related
* LRU lists before register_shrinker_prepared() is called
@@ -212,7 +338,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
goto unlock;
if (id >= shrinker_nr_max) {
- if (memcg_expand_shrinker_maps(id)) {
+ if (expand_shrinker_maps(id)) {
idr_remove(&shrinker_idr, id);
goto unlock;
}
@@ -601,7 +727,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
if (ret == SHRINK_EMPTY)
ret = 0;
else
- memcg_set_shrinker_bit(memcg, nid, i);
+ set_shrinker_bit(memcg, nid, i);
}
freed += ret;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 03/11] mm: vmscan: use shrinker_rwsem to protect shrinker_maps allocation
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
2021-01-21 23:06 ` [v4 PATCH 01/11] mm: vmscan: use nid from shrink_control for tracepoint Yang Shi
2021-01-21 23:06 ` [v4 PATCH 02/11] mm: vmscan: consolidate shrinker_maps handling code Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size Yang Shi
` (7 subsequent siblings)
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Since memcg_shrinker_map_size just can be changed under holding shrinker_rwsem
exclusively, the read side can be protected by holding read lock, so it sounds
superfluous to have a dedicated mutex.
Kirill Tkhai suggested use write lock since:
* We want the assignment to shrinker_maps is visible for shrink_slab_memcg().
* The rcu_dereference_protected() dereferrencing in shrink_slab_memcg(), but
in case of we use READ lock in alloc_shrinker_maps(), the dereferrencing
is not actually protected.
* READ lock makes alloc_shrinker_info() racy against memory allocation fail.
alloc_shrinker_info()->free_shrinker_info() may free memory right after
shrink_slab_memcg() dereferenced it. You may say
shrink_slab_memcg()->mem_cgroup_online() protects us from it? Yes, sure,
but this is not the thing we want to remember in the future, since this
spreads modularity.
And a test with heavy paging workload didn't show write lock makes things worse.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d950cead66ca..d3f3701dfcd2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -187,7 +187,6 @@ static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
static int memcg_shrinker_map_size;
-static DEFINE_MUTEX(memcg_shrinker_map_mutex);
static void free_shrinker_map_rcu(struct rcu_head *head)
{
@@ -200,8 +199,6 @@ static int expand_one_shrinker_map(struct mem_cgroup *memcg,
struct memcg_shrinker_map *new, *old;
int nid;
- lockdep_assert_held(&memcg_shrinker_map_mutex);
-
for_each_node(nid) {
old = rcu_dereference_protected(
mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
@@ -250,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
if (mem_cgroup_is_root(memcg))
return 0;
- mutex_lock(&memcg_shrinker_map_mutex);
+ down_write(&shrinker_rwsem);
size = memcg_shrinker_map_size;
for_each_node(nid) {
map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
@@ -261,7 +258,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
}
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
}
- mutex_unlock(&memcg_shrinker_map_mutex);
+ up_write(&shrinker_rwsem);
return ret;
}
@@ -276,9 +273,8 @@ static int expand_shrinker_maps(int new_id)
if (size <= old_size)
return 0;
- mutex_lock(&memcg_shrinker_map_mutex);
if (!root_mem_cgroup)
- goto unlock;
+ goto out;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
@@ -287,13 +283,13 @@ static int expand_shrinker_maps(int new_id)
ret = expand_one_shrinker_map(memcg, size, old_size);
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
- goto unlock;
+ goto out;
}
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
-unlock:
+out:
if (!ret)
memcg_shrinker_map_size = size;
- mutex_unlock(&memcg_shrinker_map_mutex);
+
return ret;
}
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (2 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 03/11] mm: vmscan: use shrinker_rwsem to protect shrinker_maps allocation Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-25 8:35 ` Kirill Tkhai
2021-01-21 23:06 ` [v4 PATCH 05/11] mm: memcontrol: rename shrinker_map to shrinker_info Yang Shi
` (6 subsequent siblings)
10 siblings, 1 reply; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the
map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both.
Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the
bit map.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d3f3701dfcd2..40e7751ef961 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
-
-static int memcg_shrinker_map_size;
+static int shrinker_nr_max;
static void free_shrinker_map_rcu(struct rcu_head *head)
{
@@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
return 0;
down_write(&shrinker_rwsem);
- size = memcg_shrinker_map_size;
+ size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
for_each_node(nid) {
map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
if (!map) {
@@ -266,10 +265,11 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
static int expand_shrinker_maps(int new_id)
{
int size, old_size, ret = 0;
+ int new_nr_max = new_id + 1;
struct mem_cgroup *memcg;
- size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
- old_size = memcg_shrinker_map_size;
+ size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
if (size <= old_size)
return 0;
@@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id)
goto out;
}
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+
out:
if (!ret)
- memcg_shrinker_map_size = size;
+ shrinker_nr_max = new_nr_max;
return ret;
}
@@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
static DEFINE_IDR(shrinker_idr);
-static int shrinker_nr_max;
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
@@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
idr_remove(&shrinker_idr, id);
goto unlock;
}
-
- shrinker_nr_max = id + 1;
}
shrinker->id = id;
ret = 0;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size
2021-01-21 23:06 ` [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size Yang Shi
@ 2021-01-25 8:35 ` Kirill Tkhai
2021-01-25 21:06 ` Yang Shi
0 siblings, 1 reply; 18+ messages in thread
From: Kirill Tkhai @ 2021-01-25 8:35 UTC (permalink / raw)
To: Yang Shi, guro, shakeelb, david, hannes, mhocko, akpm
Cc: linux-mm, linux-fsdevel, linux-kernel
On 22.01.2021 02:06, Yang Shi wrote:
> Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the
> map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both.
> Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the
> bit map.
>
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
> mm/vmscan.c | 16 +++++++---------
> 1 file changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d3f3701dfcd2..40e7751ef961 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list);
> static DECLARE_RWSEM(shrinker_rwsem);
>
> #ifdef CONFIG_MEMCG
> -
> -static int memcg_shrinker_map_size;
> +static int shrinker_nr_max;
>
> static void free_shrinker_map_rcu(struct rcu_head *head)
> {
> @@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
> return 0;
>
> down_write(&shrinker_rwsem);
> - size = memcg_shrinker_map_size;
> + size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> for_each_node(nid) {
> map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
> if (!map) {
> @@ -266,10 +265,11 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
> static int expand_shrinker_maps(int new_id)
> {
> int size, old_size, ret = 0;
> + int new_nr_max = new_id + 1;
> struct mem_cgroup *memcg;
>
> - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
> - old_size = memcg_shrinker_map_size;
> + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
>
> if (size <= old_size)
> return 0;
This looks a BUG:
expand_shrinker_maps(id == 1)
{
old_size = 64;
size = 64;
===>return 0 and shrinker_nr_max remains 0.
}
Then shrink_slab_memcg() misses this shrinker since shrinker_nr_max == 0.
>
> @@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id)
> goto out;
> }
> } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
> +
> out:
> if (!ret)
> - memcg_shrinker_map_size = size;
> + shrinker_nr_max = new_nr_max;
>
> return ret;
> }
> @@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
> #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
>
> static DEFINE_IDR(shrinker_idr);
> -static int shrinker_nr_max;
>
> static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> {
> @@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> idr_remove(&shrinker_idr, id);
> goto unlock;
> }
> -
> - shrinker_nr_max = id + 1;
> }
> shrinker->id = id;
> ret = 0;
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size
2021-01-25 8:35 ` Kirill Tkhai
@ 2021-01-25 21:06 ` Yang Shi
0 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-25 21:06 UTC (permalink / raw)
To: Kirill Tkhai
Cc: Roman Gushchin, Shakeel Butt, Dave Chinner, Johannes Weiner,
Michal Hocko, Andrew Morton, Linux MM,
Linux FS-devel Mailing List, Linux Kernel Mailing List
On Mon, Jan 25, 2021 at 12:36 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 22.01.2021 02:06, Yang Shi wrote:
> > Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the
> > map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both.
> > Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the
> > bit map.
> >
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> > mm/vmscan.c | 16 +++++++---------
> > 1 file changed, 7 insertions(+), 9 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index d3f3701dfcd2..40e7751ef961 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list);
> > static DECLARE_RWSEM(shrinker_rwsem);
> >
> > #ifdef CONFIG_MEMCG
> > -
> > -static int memcg_shrinker_map_size;
> > +static int shrinker_nr_max;
> >
> > static void free_shrinker_map_rcu(struct rcu_head *head)
> > {
> > @@ -248,7 +247,7 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
> > return 0;
> >
> > down_write(&shrinker_rwsem);
> > - size = memcg_shrinker_map_size;
> > + size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > for_each_node(nid) {
> > map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
> > if (!map) {
> > @@ -266,10 +265,11 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
> > static int expand_shrinker_maps(int new_id)
> > {
> > int size, old_size, ret = 0;
> > + int new_nr_max = new_id + 1;
> > struct mem_cgroup *memcg;
> >
> > - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
> > - old_size = memcg_shrinker_map_size;
> > + size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > + old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> >
> > if (size <= old_size)
> > return 0;
>
> This looks a BUG:
>
> expand_shrinker_maps(id == 1)
> {
> old_size = 64;
> size = 64;
>
> ===>return 0 and shrinker_nr_max remains 0.
> }
>
> Then shrink_slab_memcg() misses this shrinker since shrinker_nr_max == 0.
Yes, thanks for catching this. It should be fixed by the below patch:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bb254d39339f..47010a69b400 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -287,7 +287,7 @@ static int expand_shrinker_info(int new_id)
old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
old_size = old_m_size + old_d_size;
if (size <= old_size)
- return 0;
+ goto out;
if (!root_mem_cgroup)
goto out;
>
> >
> > @@ -286,9 +286,10 @@ static int expand_shrinker_maps(int new_id)
> > goto out;
> > }
> > } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
> > +
> > out:
> > if (!ret)
> > - memcg_shrinker_map_size = size;
> > + shrinker_nr_max = new_nr_max;
> >
> > return ret;
> > }
> > @@ -321,7 +322,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
> > #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
> >
> > static DEFINE_IDR(shrinker_idr);
> > -static int shrinker_nr_max;
> >
> > static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> > {
> > @@ -338,8 +338,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> > idr_remove(&shrinker_idr, id);
> > goto unlock;
> > }
> > -
> > - shrinker_nr_max = id + 1;
> > }
> > shrinker->id = id;
> > ret = 0;
> >
>
>
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 05/11] mm: memcontrol: rename shrinker_map to shrinker_info
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (3 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 06/11] mm: vmscan: use a new flag to indicate shrinker is registered Yang Shi
` (5 subsequent siblings)
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
The following patch is going to add nr_deferred into shrinker_map, the change will
make shrinker_map not only include map anymore, so rename it to a more general
name. And this should make the patch adding nr_deferred cleaner and readable and make
review easier. Rename "memcg_shrinker_info" to "shrinker_info" as well.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
include/linux/memcontrol.h | 8 ++---
mm/memcontrol.c | 6 ++--
mm/vmscan.c | 64 +++++++++++++++++++-------------------
3 files changed, 39 insertions(+), 39 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0ee2924991fb..62b888b88a5f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -96,7 +96,7 @@ struct lruvec_stat {
* Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
* which have elements charged to this memcg.
*/
-struct memcg_shrinker_map {
+struct shrinker_info {
struct rcu_head rcu;
unsigned long map[];
};
@@ -118,7 +118,7 @@ struct mem_cgroup_per_node {
struct mem_cgroup_reclaim_iter iter;
- struct memcg_shrinker_map __rcu *shrinker_map;
+ struct shrinker_info __rcu *shrinker_info;
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
@@ -1581,8 +1581,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return false;
}
-extern int alloc_shrinker_maps(struct mem_cgroup *memcg);
-extern void free_shrinker_maps(struct mem_cgroup *memcg);
+extern int alloc_shrinker_info(struct mem_cgroup *memcg);
+extern void free_shrinker_info(struct mem_cgroup *memcg);
extern void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id);
#else
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 76a557520a1a..65d9eb0215b5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5248,11 +5248,11 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
/*
- * A memcg must be visible for expand_shrinker_maps()
+ * A memcg must be visible for expand_shrinker_info()
* by the time the maps are allocated. So, we allocate maps
* here, when for_each_mem_cgroup() can't skip it.
*/
- if (alloc_shrinker_maps(memcg)) {
+ if (alloc_shrinker_info(memcg)) {
mem_cgroup_id_remove(memcg);
return -ENOMEM;
}
@@ -5316,7 +5316,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
vmpressure_cleanup(&memcg->vmpressure);
cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg);
- free_shrinker_maps(memcg);
+ free_shrinker_info(memcg);
memcg_free_kmem(memcg);
mem_cgroup_free(memcg);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 40e7751ef961..dcb7f2913ace 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -187,20 +187,20 @@ static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
static int shrinker_nr_max;
-static void free_shrinker_map_rcu(struct rcu_head *head)
+static void free_shrinker_info_rcu(struct rcu_head *head)
{
- kvfree(container_of(head, struct memcg_shrinker_map, rcu));
+ kvfree(container_of(head, struct shrinker_info, rcu));
}
-static int expand_one_shrinker_map(struct mem_cgroup *memcg,
+static int expand_one_shrinker_info(struct mem_cgroup *memcg,
int size, int old_size)
{
- struct memcg_shrinker_map *new, *old;
+ struct shrinker_info *new, *old;
int nid;
for_each_node(nid) {
old = rcu_dereference_protected(
- mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
+ mem_cgroup_nodeinfo(memcg, nid)->shrinker_info, true);
/* Not yet online memcg */
if (!old)
return 0;
@@ -213,17 +213,17 @@ static int expand_one_shrinker_map(struct mem_cgroup *memcg,
memset(new->map, (int)0xff, old_size);
memset((void *)new->map + old_size, 0, size - old_size);
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
- call_rcu(&old->rcu, free_shrinker_map_rcu);
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
+ call_rcu(&old->rcu, free_shrinker_info_rcu);
}
return 0;
}
-void free_shrinker_maps(struct mem_cgroup *memcg)
+void free_shrinker_info(struct mem_cgroup *memcg)
{
struct mem_cgroup_per_node *pn;
- struct memcg_shrinker_map *map;
+ struct shrinker_info *info;
int nid;
if (mem_cgroup_is_root(memcg))
@@ -231,16 +231,16 @@ void free_shrinker_maps(struct mem_cgroup *memcg)
for_each_node(nid) {
pn = mem_cgroup_nodeinfo(memcg, nid);
- map = rcu_dereference_protected(pn->shrinker_map, true);
- if (map)
- kvfree(map);
- rcu_assign_pointer(pn->shrinker_map, NULL);
+ info = rcu_dereference_protected(pn->shrinker_info, true);
+ if (info)
+ kvfree(info);
+ rcu_assign_pointer(pn->shrinker_info, NULL);
}
}
-int alloc_shrinker_maps(struct mem_cgroup *memcg)
+int alloc_shrinker_info(struct mem_cgroup *memcg)
{
- struct memcg_shrinker_map *map;
+ struct shrinker_info *info;
int nid, size, ret = 0;
if (mem_cgroup_is_root(memcg))
@@ -249,20 +249,20 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg)
down_write(&shrinker_rwsem);
size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
for_each_node(nid) {
- map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
- if (!map) {
- free_shrinker_maps(memcg);
+ info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
+ if (!info) {
+ free_shrinker_info(memcg);
ret = -ENOMEM;
break;
}
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
}
up_write(&shrinker_rwsem);
return ret;
}
-static int expand_shrinker_maps(int new_id)
+static int expand_shrinker_info(int new_id)
{
int size, old_size, ret = 0;
int new_nr_max = new_id + 1;
@@ -280,7 +280,7 @@ static int expand_shrinker_maps(int new_id)
do {
if (mem_cgroup_is_root(memcg))
continue;
- ret = expand_one_shrinker_map(memcg, size, old_size);
+ ret = expand_one_shrinker_info(memcg, size, old_size);
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
goto out;
@@ -297,13 +297,13 @@ static int expand_shrinker_maps(int new_id)
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
{
if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
- struct memcg_shrinker_map *map;
+ struct shrinker_info *info;
rcu_read_lock();
- map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
+ info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
/* Pairs with smp mb in shrink_slab() */
smp_mb__before_atomic();
- set_bit(shrinker_id, map->map);
+ set_bit(shrinker_id, info->map);
rcu_read_unlock();
}
}
@@ -334,7 +334,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
goto unlock;
if (id >= shrinker_nr_max) {
- if (expand_shrinker_maps(id)) {
+ if (expand_shrinker_info(id)) {
idr_remove(&shrinker_idr, id);
goto unlock;
}
@@ -663,7 +663,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
- struct memcg_shrinker_map *map;
+ struct shrinker_info *info;
unsigned long ret, freed = 0;
int i;
@@ -673,12 +673,12 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
if (!down_read_trylock(&shrinker_rwsem))
return 0;
- map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
- true);
- if (unlikely(!map))
+ info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ true);
+ if (unlikely(!info))
goto unlock;
- for_each_set_bit(i, map->map, shrinker_nr_max) {
+ for_each_set_bit(i, info->map, shrinker_nr_max) {
struct shrink_control sc = {
.gfp_mask = gfp_mask,
.nid = nid,
@@ -689,7 +689,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
shrinker = idr_find(&shrinker_idr, i);
if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
if (!shrinker)
- clear_bit(i, map->map);
+ clear_bit(i, info->map);
continue;
}
@@ -700,7 +700,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY) {
- clear_bit(i, map->map);
+ clear_bit(i, info->map);
/*
* After the shrinker reported that it had no objects to
* free, but before we cleared the corresponding bit in
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 06/11] mm: vmscan: use a new flag to indicate shrinker is registered
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (4 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 05/11] mm: memcontrol: rename shrinker_map to shrinker_info Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred Yang Shi
` (4 subsequent siblings)
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Currently registered shrinker is indicated by non-NULL shrinker->nr_deferred.
This approach is fine with nr_deferred at the shrinker level, but the following
patches will move MEMCG_AWARE shrinkers' nr_deferred to memcg level, so their
shrinker->nr_deferred would always be NULL. This would prevent the shrinkers
from unregistering correctly.
Remove SHRINKER_REGISTERING since we could check if shrinker is registered
successfully by the new flag.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
include/linux/shrinker.h | 7 ++++---
mm/vmscan.c | 27 +++++++++------------------
2 files changed, 13 insertions(+), 21 deletions(-)
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..1eac79ce57d4 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -79,13 +79,14 @@ struct shrinker {
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
-#define SHRINKER_MEMCG_AWARE (1 << 1)
+#define SHRINKER_REGISTERED (1 << 0)
+#define SHRINKER_NUMA_AWARE (1 << 1)
+#define SHRINKER_MEMCG_AWARE (1 << 2)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 2)
+#define SHRINKER_NONSLAB (1 << 3)
extern int prealloc_shrinker(struct shrinker *shrinker);
extern void register_shrinker_prepared(struct shrinker *shrinker);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dcb7f2913ace..018e1beb24c9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -308,19 +308,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
}
}
-/*
- * We allow subsystems to populate their shrinker-related
- * LRU lists before register_shrinker_prepared() is called
- * for the shrinker, since we don't want to impose
- * restrictions on their internal registration order.
- * In this case shrink_slab_memcg() may find corresponding
- * bit is set in the shrinkers map.
- *
- * This value is used by the function to detect registering
- * shrinkers and to skip do_shrink_slab() calls for them.
- */
-#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
-
static DEFINE_IDR(shrinker_idr);
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
@@ -329,7 +316,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
down_write(&shrinker_rwsem);
/* This may call shrinker, so it must use down_read_trylock() */
- id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
+ id = idr_alloc(&shrinker_idr, NULL, 0, 0, GFP_KERNEL);
if (id < 0)
goto unlock;
@@ -496,6 +483,7 @@ void register_shrinker_prepared(struct shrinker *shrinker)
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
idr_replace(&shrinker_idr, shrinker, shrinker->id);
#endif
+ shrinker->flags |= SHRINKER_REGISTERED;
up_write(&shrinker_rwsem);
}
@@ -515,13 +503,16 @@ EXPORT_SYMBOL(register_shrinker);
*/
void unregister_shrinker(struct shrinker *shrinker)
{
- if (!shrinker->nr_deferred)
+ if (!(shrinker->flags & SHRINKER_REGISTERED))
return;
- if (shrinker->flags & SHRINKER_MEMCG_AWARE)
- unregister_memcg_shrinker(shrinker);
+
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
+ shrinker->flags &= ~SHRINKER_REGISTERED;
up_write(&shrinker_rwsem);
+
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+ unregister_memcg_shrinker(shrinker);
kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
}
@@ -687,7 +678,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct shrinker *shrinker;
shrinker = idr_find(&shrinker_idr, i);
- if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
+ if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
if (!shrinker)
clear_bit(i, info->map);
continue;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (5 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 06/11] mm: vmscan: use a new flag to indicate shrinker is registered Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-25 9:31 ` Kirill Tkhai
2021-01-21 23:06 ` [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker Yang Shi
` (3 subsequent siblings)
10 siblings, 1 reply; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Currently the number of deferred objects are per shrinker, but some slabs, for example,
vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
may suffer from over shrink, excessive reclaim latency, etc.
For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache
might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
We observed this hit in our production environment which was running vfs heavy workload
shown as the below tracing log:
<...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
cache items 246404277 delta 31345 total_scan 123202138
<...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
last shrinker return val 123186855
The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
This also resulted in significant amount of page caches were dropped due to inodes eviction.
Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
better isolation.
When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
include/linux/memcontrol.h | 7 +++---
mm/vmscan.c | 49 +++++++++++++++++++++++++-------------
2 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 62b888b88a5f..e0384367e07d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -93,12 +93,13 @@ struct lruvec_stat {
};
/*
- * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
- * which have elements charged to this memcg.
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to this memcg.
*/
struct shrinker_info {
struct rcu_head rcu;
- unsigned long map[];
+ unsigned long *map;
+ atomic_long_t *nr_deferred;
};
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 018e1beb24c9..722aa71b13b2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -192,11 +192,13 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
kvfree(container_of(head, struct shrinker_info, rcu));
}
-static int expand_one_shrinker_info(struct mem_cgroup *memcg,
- int size, int old_size)
+static int expand_one_shrinker_info(struct mem_cgroup *memcg, int nr_max,
+ int m_size, int d_size,
+ int old_m_size, int old_d_size)
{
struct shrinker_info *new, *old;
int nid;
+ int size = m_size + d_size;
for_each_node(nid) {
old = rcu_dereference_protected(
@@ -209,9 +211,16 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
if (!new)
return -ENOMEM;
- /* Set all old bits, clear all new bits */
- memset(new->map, (int)0xff, old_size);
- memset((void *)new->map + old_size, 0, size - old_size);
+ new->map = (unsigned long *)(new + 1);
+ new->nr_deferred = (atomic_long_t *)(new->map +
+ nr_max / BITS_PER_LONG + 1);
+
+ /* map: set all old bits, clear all new bits */
+ memset(new->map, (int)0xff, old_m_size);
+ memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
+ /* nr_deferred: copy old values, clear all new values */
+ memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
+ memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
call_rcu(&old->rcu, free_shrinker_info_rcu);
@@ -226,9 +235,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
struct shrinker_info *info;
int nid;
- if (mem_cgroup_is_root(memcg))
- return;
-
for_each_node(nid) {
pn = mem_cgroup_nodeinfo(memcg, nid);
info = rcu_dereference_protected(pn->shrinker_info, true);
@@ -242,12 +248,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
{
struct shrinker_info *info;
int nid, size, ret = 0;
-
- if (mem_cgroup_is_root(memcg))
- return 0;
+ int m_size, d_size = 0;
down_write(&shrinker_rwsem);
- size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ d_size = shrinker_nr_max * sizeof(atomic_long_t);
+ size = m_size + d_size;
+
for_each_node(nid) {
info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
if (!info) {
@@ -255,6 +262,9 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
ret = -ENOMEM;
break;
}
+ info->map = (unsigned long *)(info + 1);
+ info->nr_deferred = (atomic_long_t *)(info->map +
+ shrinker_nr_max / BITS_PER_LONG + 1);
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
}
up_write(&shrinker_rwsem);
@@ -266,10 +276,16 @@ static int expand_shrinker_info(int new_id)
{
int size, old_size, ret = 0;
int new_nr_max = new_id + 1;
+ int m_size, d_size = 0;
+ int old_m_size, old_d_size = 0;
struct mem_cgroup *memcg;
- size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
- old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ m_size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ d_size = new_nr_max * sizeof(atomic_long_t);
+ size = m_size + d_size;
+ old_m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
+ old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
+ old_size = old_m_size + old_d_size;
if (size <= old_size)
return 0;
@@ -278,9 +294,8 @@ static int expand_shrinker_info(int new_id)
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- if (mem_cgroup_is_root(memcg))
- continue;
- ret = expand_one_shrinker_info(memcg, size, old_size);
+ ret = expand_one_shrinker_info(memcg, new_nr_max, m_size, d_size,
+ old_m_size, old_d_size);
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
goto out;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred
2021-01-21 23:06 ` [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred Yang Shi
@ 2021-01-25 9:31 ` Kirill Tkhai
2021-01-25 21:08 ` Yang Shi
0 siblings, 1 reply; 18+ messages in thread
From: Kirill Tkhai @ 2021-01-25 9:31 UTC (permalink / raw)
To: Yang Shi, guro, shakeelb, david, hannes, mhocko, akpm
Cc: linux-mm, linux-fsdevel, linux-kernel
On 22.01.2021 02:06, Yang Shi wrote:
> Currently the number of deferred objects are per shrinker, but some slabs, for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
>
> The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
>
> For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
>
> We observed this hit in our production environment which was running vfs heavy workload
> shown as the below tracing log:
>
> <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> last shrinker return val 123186855
>
> The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> This also resulted in significant amount of page caches were dropped due to inodes eviction.
>
> Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> better isolation.
>
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time.
>
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
> include/linux/memcontrol.h | 7 +++---
> mm/vmscan.c | 49 +++++++++++++++++++++++++-------------
> 2 files changed, 36 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 62b888b88a5f..e0384367e07d 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
> };
>
> /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
> */
> struct shrinker_info {
> struct rcu_head rcu;
> - unsigned long map[];
> + unsigned long *map;
> + atomic_long_t *nr_deferred;
> };
>
> /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 018e1beb24c9..722aa71b13b2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -192,11 +192,13 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
> kvfree(container_of(head, struct shrinker_info, rcu));
> }
>
> -static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> - int size, int old_size)
> +static int expand_one_shrinker_info(struct mem_cgroup *memcg, int nr_max,
> + int m_size, int d_size,
> + int old_m_size, int old_d_size)
> {
> struct shrinker_info *new, *old;
> int nid;
> + int size = m_size + d_size;
>
> for_each_node(nid) {
> old = rcu_dereference_protected(
> @@ -209,9 +211,16 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> if (!new)
> return -ENOMEM;
>
> - /* Set all old bits, clear all new bits */
> - memset(new->map, (int)0xff, old_size);
> - memset((void *)new->map + old_size, 0, size - old_size);
> + new->map = (unsigned long *)(new + 1);
> + new->nr_deferred = (atomic_long_t *)(new->map +
> + nr_max / BITS_PER_LONG + 1);
Why not
new->nr_deferred = (void *)new->map + m_size;
?
> +
> + /* map: set all old bits, clear all new bits */
> + memset(new->map, (int)0xff, old_m_size);
> + memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> + /* nr_deferred: copy old values, clear all new values */
> + memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> + memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
>
> rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> call_rcu(&old->rcu, free_shrinker_info_rcu);
> @@ -226,9 +235,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> struct shrinker_info *info;
> int nid;
>
> - if (mem_cgroup_is_root(memcg))
> - return;
> -
> for_each_node(nid) {
> pn = mem_cgroup_nodeinfo(memcg, nid);
> info = rcu_dereference_protected(pn->shrinker_info, true);
> @@ -242,12 +248,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> {
> struct shrinker_info *info;
> int nid, size, ret = 0;
> -
> - if (mem_cgroup_is_root(memcg))
> - return 0;
> + int m_size, d_size = 0;
>
> down_write(&shrinker_rwsem);
> - size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> + m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> + d_size = shrinker_nr_max * sizeof(atomic_long_t);
> + size = m_size + d_size;
> +
> for_each_node(nid) {
> info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> if (!info) {
> @@ -255,6 +262,9 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> ret = -ENOMEM;
> break;
> }
> + info->map = (unsigned long *)(info + 1);
> + info->nr_deferred = (atomic_long_t *)(info->map +
> + shrinker_nr_max / BITS_PER_LONG + 1);
Why not:
info->nr_deferred = (void*)info->map + m_size;
?
> rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> }
> up_write(&shrinker_rwsem);
> @@ -266,10 +276,16 @@ static int expand_shrinker_info(int new_id)
> {
> int size, old_size, ret = 0;
> int new_nr_max = new_id + 1;
> + int m_size, d_size = 0;
> + int old_m_size, old_d_size = 0;
> struct mem_cgroup *memcg;
>
> - size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> - old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> + m_size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> + d_size = new_nr_max * sizeof(atomic_long_t);
> + size = m_size + d_size;
> + old_m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
Could you please pack this twice repeating pattern into some macro? E.g.,
#define NR_MAX_TO_SHR_MAP_SIZE(nr_max) \
((nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long))
> + old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> + old_size = old_m_size + old_d_size;
> if (size <= old_size)
> return 0;
>
> @@ -278,9 +294,8 @@ static int expand_shrinker_info(int new_id)
>
> memcg = mem_cgroup_iter(NULL, NULL, NULL);
> do {
> - if (mem_cgroup_is_root(memcg))
> - continue;
> - ret = expand_one_shrinker_info(memcg, size, old_size);
> + ret = expand_one_shrinker_info(memcg, new_nr_max, m_size, d_size,
> + old_m_size, old_d_size);
> if (ret) {
> mem_cgroup_iter_break(NULL, memcg);
> goto out;
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred
2021-01-25 9:31 ` Kirill Tkhai
@ 2021-01-25 21:08 ` Yang Shi
0 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-25 21:08 UTC (permalink / raw)
To: Kirill Tkhai
Cc: Roman Gushchin, Shakeel Butt, Dave Chinner, Johannes Weiner,
Michal Hocko, Andrew Morton, Linux MM,
Linux FS-devel Mailing List, Linux Kernel Mailing List
On Mon, Jan 25, 2021 at 1:31 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 22.01.2021 02:06, Yang Shi wrote:
> > Currently the number of deferred objects are per shrinker, but some slabs, for example,
> > vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
> >
> > The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> > excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> > may suffer from over shrink, excessive reclaim latency, etc.
> >
> > For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> > heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache
> > might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
> >
> > We observed this hit in our production environment which was running vfs heavy workload
> > shown as the below tracing log:
> >
> > <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> > cache items 246404277 delta 31345 total_scan 123202138
> > <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> > last shrinker return val 123186855
> >
> > The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> > This also resulted in significant amount of page caches were dropped due to inodes eviction.
> >
> > Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> > better isolation.
> >
> > When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> > would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time.
> >
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> > include/linux/memcontrol.h | 7 +++---
> > mm/vmscan.c | 49 +++++++++++++++++++++++++-------------
> > 2 files changed, 36 insertions(+), 20 deletions(-)
> >
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index 62b888b88a5f..e0384367e07d 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -93,12 +93,13 @@ struct lruvec_stat {
> > };
> >
> > /*
> > - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> > - * which have elements charged to this memcg.
> > + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> > + * shrinkers, which have elements charged to this memcg.
> > */
> > struct shrinker_info {
> > struct rcu_head rcu;
> > - unsigned long map[];
> > + unsigned long *map;
> > + atomic_long_t *nr_deferred;
> > };
> >
> > /*
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 018e1beb24c9..722aa71b13b2 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -192,11 +192,13 @@ static void free_shrinker_info_rcu(struct rcu_head *head)
> > kvfree(container_of(head, struct shrinker_info, rcu));
> > }
> >
> > -static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > - int size, int old_size)
> > +static int expand_one_shrinker_info(struct mem_cgroup *memcg, int nr_max,
> > + int m_size, int d_size,
> > + int old_m_size, int old_d_size)
> > {
> > struct shrinker_info *new, *old;
> > int nid;
> > + int size = m_size + d_size;
> >
> > for_each_node(nid) {
> > old = rcu_dereference_protected(
> > @@ -209,9 +211,16 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > if (!new)
> > return -ENOMEM;
> >
> > - /* Set all old bits, clear all new bits */
> > - memset(new->map, (int)0xff, old_size);
> > - memset((void *)new->map + old_size, 0, size - old_size);
> > + new->map = (unsigned long *)(new + 1);
> > + new->nr_deferred = (atomic_long_t *)(new->map +
> > + nr_max / BITS_PER_LONG + 1);
>
> Why not
>
> new->nr_deferred = (void *)new->map + m_size;
> ?
>
> > +
> > + /* map: set all old bits, clear all new bits */
> > + memset(new->map, (int)0xff, old_m_size);
> > + memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> > + /* nr_deferred: copy old values, clear all new values */
> > + memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> > + memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
> >
> > rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> > call_rcu(&old->rcu, free_shrinker_info_rcu);
> > @@ -226,9 +235,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> > struct shrinker_info *info;
> > int nid;
> >
> > - if (mem_cgroup_is_root(memcg))
> > - return;
> > -
> > for_each_node(nid) {
> > pn = mem_cgroup_nodeinfo(memcg, nid);
> > info = rcu_dereference_protected(pn->shrinker_info, true);
> > @@ -242,12 +248,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> > {
> > struct shrinker_info *info;
> > int nid, size, ret = 0;
> > -
> > - if (mem_cgroup_is_root(memcg))
> > - return 0;
> > + int m_size, d_size = 0;
> >
> > down_write(&shrinker_rwsem);
> > - size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > + m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > + d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > + size = m_size + d_size;
> > +
> > for_each_node(nid) {
> > info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> > if (!info) {
> > @@ -255,6 +262,9 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> > ret = -ENOMEM;
> > break;
> > }
> > + info->map = (unsigned long *)(info + 1);
> > + info->nr_deferred = (atomic_long_t *)(info->map +
> > + shrinker_nr_max / BITS_PER_LONG + 1);
>
> Why not:
> info->nr_deferred = (void*)info->map + m_size;
Yes, definitely. Will fix in v5.
> ?
>
> > rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> > }
> > up_write(&shrinker_rwsem);
> > @@ -266,10 +276,16 @@ static int expand_shrinker_info(int new_id)
> > {
> > int size, old_size, ret = 0;
> > int new_nr_max = new_id + 1;
> > + int m_size, d_size = 0;
> > + int old_m_size, old_d_size = 0;
> > struct mem_cgroup *memcg;
> >
> > - size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > - old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > + m_size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > + d_size = new_nr_max * sizeof(atomic_long_t);
> > + size = m_size + d_size;
> > + old_m_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
>
> Could you please pack this twice repeating pattern into some macro? E.g.,
>
> #define NR_MAX_TO_SHR_MAP_SIZE(nr_max) \
> ((nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long))
Sure. Will incorporate in v5.
>
> > + old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > + old_size = old_m_size + old_d_size;
> > if (size <= old_size)
> > return 0;
> >
> > @@ -278,9 +294,8 @@ static int expand_shrinker_info(int new_id)
> >
> > memcg = mem_cgroup_iter(NULL, NULL, NULL);
> > do {
> > - if (mem_cgroup_is_root(memcg))
> > - continue;
> > - ret = expand_one_shrinker_info(memcg, size, old_size);
> > + ret = expand_one_shrinker_info(memcg, new_nr_max, m_size, d_size,
> > + old_m_size, old_d_size);
> > if (ret) {
> > mem_cgroup_iter_break(NULL, memcg);
> > goto out;
> >
>
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (6 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-25 9:35 ` Kirill Tkhai
2021-01-21 23:06 ` [v4 PATCH 09/11] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers Yang Shi
` (2 subsequent siblings)
10 siblings, 1 reply; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred
will be used in the following cases:
1. Non memcg aware shrinkers
2. !CONFIG_MEMCG
3. memcg is disabled by boot parameter
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 69 insertions(+), 12 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 722aa71b13b2..d8e77ea13815 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
up_write(&shrinker_rwsem);
}
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ struct shrinker_info *info;
+
+ info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ true);
+ return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ struct shrinker_info *info;
+
+ info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ true);
+
+ return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
+}
+
static bool cgroup_reclaim(struct scan_control *sc)
{
return sc->target_mem_cgroup;
@@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
}
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
static bool cgroup_reclaim(struct scan_control *sc)
{
return false;
@@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
}
#endif
+static long count_nr_deferred(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ int nid = sc->nid;
+
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+ nid = 0;
+
+ if (sc->memcg &&
+ (shrinker->flags & SHRINKER_MEMCG_AWARE))
+ return count_nr_deferred_memcg(nid, shrinker,
+ sc->memcg);
+
+ return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+
+static long set_nr_deferred(long nr, struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ int nid = sc->nid;
+
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+ nid = 0;
+
+ if (sc->memcg &&
+ (shrinker->flags & SHRINKER_MEMCG_AWARE))
+ return set_nr_deferred_memcg(nr, nid, shrinker,
+ sc->memcg);
+
+ return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
+}
+
/*
* This misses isolated pages which are not accounted for to save counters.
* As the data only determines if reclaim or compaction continues, it is
@@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
long freeable;
long nr;
long new_nr;
- int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
long scanned = 0, next_deferred;
- if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
- nid = 0;
-
freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
@@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+ nr = count_nr_deferred(shrinker, shrinkctl);
total_scan = nr;
if (shrinker->seeks) {
@@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
next_deferred = 0;
/*
* move the unused scan count back into the shrinker in a
- * manner that handles concurrent updates. If we exhausted the
- * scan, there is no need to do an update.
+ * manner that handles concurrent updates.
*/
- if (next_deferred > 0)
- new_nr = atomic_long_add_return(next_deferred,
- &shrinker->nr_deferred[nid]);
- else
- new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+ new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
return freed;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker
2021-01-21 23:06 ` [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker Yang Shi
@ 2021-01-25 9:35 ` Kirill Tkhai
2021-01-25 21:16 ` Yang Shi
0 siblings, 1 reply; 18+ messages in thread
From: Kirill Tkhai @ 2021-01-25 9:35 UTC (permalink / raw)
To: Yang Shi, guro, shakeelb, david, hannes, mhocko, akpm
Cc: linux-mm, linux-fsdevel, linux-kernel
On 22.01.2021 02:06, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred
> will be used in the following cases:
> 1. Non memcg aware shrinkers
> 2. !CONFIG_MEMCG
> 3. memcg is disabled by boot parameter
>
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
> mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 69 insertions(+), 12 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 722aa71b13b2..d8e77ea13815 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> up_write(&shrinker_rwsem);
> }
>
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> + struct mem_cgroup *memcg)
> +{
> + struct shrinker_info *info;
> +
> + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> + true);
Since now these rcu_dereference_protected() are in separate functions and there is
no taking a lock near them, it seems it would be better to underling the desired
lock with rcu_dereference_protected(, lockdep_assert_held(lock_you_need_here_locked));
> + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> + struct mem_cgroup *memcg)
> +{
> + struct shrinker_info *info;
> +
> + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> + true);
> +
> + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> +}
> +
> static bool cgroup_reclaim(struct scan_control *sc)
> {
> return sc->target_mem_cgroup;
> @@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> {
> }
>
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> + struct mem_cgroup *memcg)
> +{
> + return 0;
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> + struct mem_cgroup *memcg)
> +{
> + return 0;
> +}
> +
> static bool cgroup_reclaim(struct scan_control *sc)
> {
> return false;
> @@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
> }
> #endif
>
> +static long count_nr_deferred(struct shrinker *shrinker,
> + struct shrink_control *sc)
> +{
> + int nid = sc->nid;
> +
> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> + nid = 0;
> +
> + if (sc->memcg &&
> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> + return count_nr_deferred_memcg(nid, shrinker,
> + sc->memcg);
> +
> + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +}
> +
> +
> +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> + struct shrink_control *sc)
> +{
> + int nid = sc->nid;
> +
> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> + nid = 0;
> +
> + if (sc->memcg &&
> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> + return set_nr_deferred_memcg(nr, nid, shrinker,
> + sc->memcg);
> +
> + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> +}
> +
> /*
> * This misses isolated pages which are not accounted for to save counters.
> * As the data only determines if reclaim or compaction continues, it is
> @@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> long freeable;
> long nr;
> long new_nr;
> - int nid = shrinkctl->nid;
> long batch_size = shrinker->batch ? shrinker->batch
> : SHRINK_BATCH;
> long scanned = 0, next_deferred;
>
> - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> - nid = 0;
> -
> freeable = shrinker->count_objects(shrinker, shrinkctl);
> if (freeable == 0 || freeable == SHRINK_EMPTY)
> return freeable;
> @@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> * and zero it so that other concurrent shrinker invocations
> * don't also do this scanning work.
> */
> - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> + nr = count_nr_deferred(shrinker, shrinkctl);
>
> total_scan = nr;
> if (shrinker->seeks) {
> @@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> next_deferred = 0;
> /*
> * move the unused scan count back into the shrinker in a
> - * manner that handles concurrent updates. If we exhausted the
> - * scan, there is no need to do an update.
> + * manner that handles concurrent updates.
> */
> - if (next_deferred > 0)
> - new_nr = atomic_long_add_return(next_deferred,
> - &shrinker->nr_deferred[nid]);
> - else
> - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> + new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
>
> trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
> return freed;
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker
2021-01-25 9:35 ` Kirill Tkhai
@ 2021-01-25 21:16 ` Yang Shi
0 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-25 21:16 UTC (permalink / raw)
To: Kirill Tkhai
Cc: Roman Gushchin, Shakeel Butt, Dave Chinner, Johannes Weiner,
Michal Hocko, Andrew Morton, Linux MM,
Linux FS-devel Mailing List, Linux Kernel Mailing List
On Mon, Jan 25, 2021 at 1:35 AM Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>
> On 22.01.2021 02:06, Yang Shi wrote:
> > Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred
> > will be used in the following cases:
> > 1. Non memcg aware shrinkers
> > 2. !CONFIG_MEMCG
> > 3. memcg is disabled by boot parameter
> >
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> > mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
> > 1 file changed, 69 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 722aa71b13b2..d8e77ea13815 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> > up_write(&shrinker_rwsem);
> > }
> >
> > +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> > + struct mem_cgroup *memcg)
> > +{
> > + struct shrinker_info *info;
> > +
> > + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> > + true);
>
> Since now these rcu_dereference_protected() are in separate functions and there is
> no taking a lock near them, it seems it would be better to underling the desired
> lock with rcu_dereference_protected(, lockdep_assert_held(lock_you_need_here_locked));
Sure. Will incorporate in v5. BTW I noticed using
lockdep_assert_held() in the parameter of the functions will result in
compilation failure with gcc 10.0.1 (shipped with Fedora 32), but fine
with gcc 8.3.1.
In file included from ./include/linux/rbtree.h:22,
from ./include/linux/mm_types.h:10,
from ./include/linux/mmzone.h:21,
from ./include/linux/gfp.h:6,
from ./include/linux/mm.h:10,
from mm/vmscan.c:15:
mm/vmscan.c: In function ‘shrinker_info_protected’:
./include/linux/lockdep.h:386:34: error: expected expression before ‘do’
386 | #define lockdep_assert_held(l) do { (void)(l); } while (0)
| ^~
./include/linux/rcupdate.h:337:52: note: in definition of macro
‘RCU_LOCKDEP_WARN’
337 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
| ^
./include/linux/rcupdate.h:554:2: note: in expansion of macro
‘__rcu_dereference_protected’
554 | __rcu_dereference_protected((p), (c), __rcu)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmscan.c:389:9: note: in expansion of macro ‘rcu_dereference_protected’
389 | return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
| ^~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmscan.c:390:7: note: in expansion of macro ‘lockdep_assert_held’
390 | lockdep_assert_held(&shrinker_rwsem));
| ^~~~~~~~~~~~~~~~~~~
I didn't dig into the root cause. Just use lockdep_is_held() instead
of lockdep_assert_held().
>
>
> > + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> > +}
> > +
> > +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> > + struct mem_cgroup *memcg)
> > +{
> > + struct shrinker_info *info;
> > +
> > + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> > + true);
> > +
> > + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> > +}
> > +
> > static bool cgroup_reclaim(struct scan_control *sc)
> > {
> > return sc->target_mem_cgroup;
> > @@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> > {
> > }
> >
> > +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> > + struct mem_cgroup *memcg)
> > +{
> > + return 0;
> > +}
> > +
> > +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> > + struct mem_cgroup *memcg)
> > +{
> > + return 0;
> > +}
> > +
> > static bool cgroup_reclaim(struct scan_control *sc)
> > {
> > return false;
> > @@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
> > }
> > #endif
> >
> > +static long count_nr_deferred(struct shrinker *shrinker,
> > + struct shrink_control *sc)
> > +{
> > + int nid = sc->nid;
> > +
> > + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > + nid = 0;
> > +
> > + if (sc->memcg &&
> > + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> > + return count_nr_deferred_memcg(nid, shrinker,
> > + sc->memcg);
> > +
> > + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> > +}
> > +
> > +
> > +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> > + struct shrink_control *sc)
> > +{
> > + int nid = sc->nid;
> > +
> > + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > + nid = 0;
> > +
> > + if (sc->memcg &&
> > + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> > + return set_nr_deferred_memcg(nr, nid, shrinker,
> > + sc->memcg);
> > +
> > + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> > +}
> > +
> > /*
> > * This misses isolated pages which are not accounted for to save counters.
> > * As the data only determines if reclaim or compaction continues, it is
> > @@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> > long freeable;
> > long nr;
> > long new_nr;
> > - int nid = shrinkctl->nid;
> > long batch_size = shrinker->batch ? shrinker->batch
> > : SHRINK_BATCH;
> > long scanned = 0, next_deferred;
> >
> > - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> > - nid = 0;
> > -
> > freeable = shrinker->count_objects(shrinker, shrinkctl);
> > if (freeable == 0 || freeable == SHRINK_EMPTY)
> > return freeable;
> > @@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> > * and zero it so that other concurrent shrinker invocations
> > * don't also do this scanning work.
> > */
> > - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> > + nr = count_nr_deferred(shrinker, shrinkctl);
> >
> > total_scan = nr;
> > if (shrinker->seeks) {
> > @@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> > next_deferred = 0;
> > /*
> > * move the unused scan count back into the shrinker in a
> > - * manner that handles concurrent updates. If we exhausted the
> > - * scan, there is no need to do an update.
> > + * manner that handles concurrent updates.
> > */
> > - if (next_deferred > 0)
> > - new_nr = atomic_long_add_return(next_deferred,
> > - &shrinker->nr_deferred[nid]);
> > - else
> > - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> > + new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
> >
> > trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
> > return freed;
> >
>
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* [v4 PATCH 09/11] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (7 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 10/11] mm: memcontrol: reparent nr_deferred when memcg offline Yang Shi
2021-01-21 23:06 ` [v4 PATCH 11/11] mm: vmscan: shrink deferred objects proportional to priority Yang Shi
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Now nr_deferred is available on per memcg level for memcg aware shrinkers, so don't need
allocate shrinker->nr_deferred for such shrinkers anymore.
The prealloc_memcg_shrinker() would return -ENOSYS if !CONFIG_MEMCG or memcg is disabled
by kernel command line, then shrinker's SHRINKER_MEMCG_AWARE flag would be cleared.
This makes the implementation of this patch simpler.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 33 ++++++++++++++++++---------------
1 file changed, 18 insertions(+), 15 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d8e77ea13815..ea1402e7b968 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -329,6 +329,9 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
int id, ret = -ENOMEM;
+ if (mem_cgroup_disabled())
+ return -ENOSYS;
+
down_write(&shrinker_rwsem);
/* This may call shrinker, so it must use down_read_trylock() */
id = idr_alloc(&shrinker_idr, NULL, 0, 0, GFP_KERNEL);
@@ -411,7 +414,7 @@ static bool writeback_throttling_sane(struct scan_control *sc)
#else
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
- return 0;
+ return -ENOSYS;
}
static void unregister_memcg_shrinker(struct shrinker *shrinker)
@@ -522,8 +525,20 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
*/
int prealloc_shrinker(struct shrinker *shrinker)
{
- unsigned int size = sizeof(*shrinker->nr_deferred);
+ unsigned int size;
+ int err;
+
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
+ err = prealloc_memcg_shrinker(shrinker);
+ if (!err)
+ return 0;
+ if (err != -ENOSYS)
+ return err;
+
+ shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
+ }
+ size = sizeof(*shrinker->nr_deferred);
if (shrinker->flags & SHRINKER_NUMA_AWARE)
size *= nr_node_ids;
@@ -531,26 +546,14 @@ int prealloc_shrinker(struct shrinker *shrinker)
if (!shrinker->nr_deferred)
return -ENOMEM;
- if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
- if (prealloc_memcg_shrinker(shrinker))
- goto free_deferred;
- }
return 0;
-
-free_deferred:
- kfree(shrinker->nr_deferred);
- shrinker->nr_deferred = NULL;
- return -ENOMEM;
}
void free_prealloced_shrinker(struct shrinker *shrinker)
{
- if (!shrinker->nr_deferred)
- return;
-
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
- unregister_memcg_shrinker(shrinker);
+ return unregister_memcg_shrinker(shrinker);
kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 10/11] mm: memcontrol: reparent nr_deferred when memcg offline
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (8 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 09/11] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
2021-01-21 23:06 ` [v4 PATCH 11/11] mm: vmscan: shrink deferred objects proportional to priority Yang Shi
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
Now shrinker's nr_deferred is per memcg for memcg aware shrinkers, add to parent's
corresponding nr_deferred when memcg offline.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
include/linux/memcontrol.h | 1 +
mm/memcontrol.c | 1 +
mm/vmscan.c | 31 +++++++++++++++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0384367e07d..fe1375f08881 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1586,6 +1586,7 @@ extern int alloc_shrinker_info(struct mem_cgroup *memcg);
extern void free_shrinker_info(struct mem_cgroup *memcg);
extern void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id);
+extern void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 65d9eb0215b5..cccf2bacb147 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5284,6 +5284,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
page_counter_set_low(&memcg->memory, 0);
memcg_offline_kmem(memcg);
+ reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ea1402e7b968..e73f200ffd2d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -383,6 +383,37 @@ static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
}
+static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
+ int nid)
+{
+ return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ lockdep_is_held(&shrinker_rwsem));
+}
+
+void reparent_shrinker_deferred(struct mem_cgroup *memcg)
+{
+ int i, nid;
+ long nr;
+ struct mem_cgroup *parent;
+ struct shrinker_info *child_info, *parent_info;
+
+ parent = parent_mem_cgroup(memcg);
+ if (!parent)
+ parent = root_mem_cgroup;
+
+ /* Prevent from concurrent shrinker_info expand */
+ down_read(&shrinker_rwsem);
+ for_each_node(nid) {
+ child_info = shrinker_info_protected(memcg, nid);
+ parent_info = shrinker_info_protected(parent, nid);
+ for (i = 0; i < shrinker_nr_max; i++) {
+ nr = atomic_long_read(&child_info->nr_deferred[i]);
+ atomic_long_add(nr, &parent_info->nr_deferred[i]);
+ }
+ }
+ up_read(&shrinker_rwsem);
+}
+
static bool cgroup_reclaim(struct scan_control *sc)
{
return sc->target_mem_cgroup;
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [v4 PATCH 11/11] mm: vmscan: shrink deferred objects proportional to priority
2021-01-21 23:06 [v4 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
` (9 preceding siblings ...)
2021-01-21 23:06 ` [v4 PATCH 10/11] mm: memcontrol: reparent nr_deferred when memcg offline Yang Shi
@ 2021-01-21 23:06 ` Yang Shi
10 siblings, 0 replies; 18+ messages in thread
From: Yang Shi @ 2021-01-21 23:06 UTC (permalink / raw)
To: guro, ktkhai, shakeelb, david, hannes, mhocko, akpm
Cc: shy828301, linux-mm, linux-fsdevel, linux-kernel
The number of deferred objects might get windup to an absurd number, and it results in
clamp of slab objects. It is undesirable for sustaining workingset.
So shrink deferred objects proportional to priority and cap nr_deferred to twice of
cache items.
The idea is borrowed fron Dave Chinner's patch:
https://lore.kernel.org/linux-xfs/20191031234618.15403-13-david@fromorbit.com/
Tested with kernel build and vfs metadata heavy workload, no regression is spotted
so far. But it still may have regression for some corner cases.
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 40 +++++-----------------------------------
1 file changed, 5 insertions(+), 35 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e73f200ffd2d..bb254d39339f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -659,7 +659,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
*/
nr = count_nr_deferred(shrinker, shrinkctl);
- total_scan = nr;
if (shrinker->seeks) {
delta = freeable >> priority;
delta *= 4;
@@ -673,37 +672,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable / 2;
}
+ total_scan = nr >> priority;
total_scan += delta;
- if (total_scan < 0) {
- pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
- shrinker->scan_objects, total_scan);
- total_scan = freeable;
- next_deferred = nr;
- } else
- next_deferred = total_scan;
-
- /*
- * We need to avoid excessive windup on filesystem shrinkers
- * due to large numbers of GFP_NOFS allocations causing the
- * shrinkers to return -1 all the time. This results in a large
- * nr being built up so when a shrink that can do some work
- * comes along it empties the entire cache due to nr >>>
- * freeable. This is bad for sustaining a working set in
- * memory.
- *
- * Hence only allow the shrinker to scan the entire cache when
- * a large delta change is calculated directly.
- */
- if (delta < freeable / 4)
- total_scan = min(total_scan, freeable / 2);
-
- /*
- * Avoid risking looping forever due to too large nr value:
- * never try to free more than twice the estimate number of
- * freeable entries.
- */
- if (total_scan > freeable * 2)
- total_scan = freeable * 2;
+ total_scan = min(total_scan, (2 * freeable));
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
freeable, delta, total_scan, priority);
@@ -742,10 +713,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
cond_resched();
}
- if (next_deferred >= scanned)
- next_deferred -= scanned;
- else
- next_deferred = 0;
+ next_deferred = max_t(long, (nr - scanned), 0) + total_scan;
+ next_deferred = min(next_deferred, (2 * freeable));
+
/*
* move the unused scan count back into the shrinker in a
* manner that handles concurrent updates.
--
2.26.2
^ permalink raw reply related [flat|nested] 18+ messages in thread