* + mm-memcontrol-kill-mem_cgroup_nodeinfo.patch added to -mm tree
@ 2021-03-08 23:08 akpm
0 siblings, 0 replies; only message in thread
From: akpm @ 2021-03-08 23:08 UTC (permalink / raw)
To: guro, hannes, mhocko, mkoutny, mm-commits, shakeelb, tj
The patch titled
Subject: mm: memcontrol: kill mem_cgroup_nodeinfo()
has been added to the -mm tree. Its filename is
mm-memcontrol-kill-mem_cgroup_nodeinfo.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/mm-memcontrol-kill-mem_cgroup_nodeinfo.patch
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcontrol-kill-mem_cgroup_nodeinfo.patch
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Johannes Weiner <hannes@cmpxchg.org>
Subject: mm: memcontrol: kill mem_cgroup_nodeinfo()
No need to encapsulate a simple struct member access.
Link: https://lkml.kernel.org/r/20210209163304.77088-3-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Michal Koutný <mkoutny@suse.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/memcontrol.h | 8 +-------
mm/memcontrol.c | 21 +++++++++++----------
2 files changed, 12 insertions(+), 17 deletions(-)
--- a/include/linux/memcontrol.h~mm-memcontrol-kill-mem_cgroup_nodeinfo
+++ a/include/linux/memcontrol.h
@@ -605,12 +605,6 @@ void mem_cgroup_uncharge_list(struct lis
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
-static struct mem_cgroup_per_node *
-mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
-{
- return memcg->nodeinfo[nid];
-}
-
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
@@ -634,7 +628,7 @@ static inline struct lruvec *mem_cgroup_
if (!memcg)
memcg = root_mem_cgroup;
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ mz = memcg->nodeinfo[pgdat->node_id];
lruvec = &mz->lruvec;
out:
/*
--- a/mm/memcontrol.c~mm-memcontrol-kill-mem_cgroup_nodeinfo
+++ a/mm/memcontrol.c
@@ -414,13 +414,14 @@ static int memcg_expand_one_shrinker_map
int size, int old_size)
{
struct memcg_shrinker_map *new, *old;
+ struct mem_cgroup_per_node *pn;
int nid;
lockdep_assert_held(&memcg_shrinker_map_mutex);
for_each_node(nid) {
- old = rcu_dereference_protected(
- mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
+ pn = memcg->nodeinfo[nid];
+ old = rcu_dereference_protected(pn->shrinker_map, true);
/* Not yet online memcg */
if (!old)
return 0;
@@ -433,7 +434,7 @@ static int memcg_expand_one_shrinker_map
memset(new->map, (int)0xff, old_size);
memset((void *)new->map + old_size, 0, size - old_size);
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
+ rcu_assign_pointer(pn->shrinker_map, new);
call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
}
@@ -450,7 +451,7 @@ static void memcg_free_shrinker_maps(str
return;
for_each_node(nid) {
- pn = mem_cgroup_nodeinfo(memcg, nid);
+ pn = memcg->nodeinfo[nid];
map = rcu_dereference_protected(pn->shrinker_map, true);
kvfree(map);
rcu_assign_pointer(pn->shrinker_map, NULL);
@@ -713,7 +714,7 @@ static void mem_cgroup_remove_from_trees
int nid;
for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(memcg, nid);
+ mz = memcg->nodeinfo[nid];
mctz = soft_limit_tree_node(nid);
if (mctz)
mem_cgroup_remove_exceeded(mz, mctz);
@@ -796,7 +797,7 @@ parent_nodeinfo(struct mem_cgroup_per_no
parent = parent_mem_cgroup(pn->memcg);
if (!parent)
return NULL;
- return mem_cgroup_nodeinfo(parent, nid);
+ return parent->nodeinfo[nid];
}
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
@@ -1136,7 +1137,7 @@ struct mem_cgroup *mem_cgroup_iter(struc
if (reclaim) {
struct mem_cgroup_per_node *mz;
- mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
+ mz = root->nodeinfo[reclaim->pgdat->node_id];
iter = &mz->iter;
if (prev && reclaim->generation != iter->generation)
@@ -1238,7 +1239,7 @@ static void __invalidate_reclaim_iterato
int nid;
for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(from, nid);
+ mz = from->nodeinfo[nid];
iter = &mz->iter;
cmpxchg(&iter->position, dead_memcg, NULL);
}
@@ -2397,7 +2398,7 @@ static int memcg_hotplug_cpu_dead(unsign
struct mem_cgroup_per_node *pn;
long x;
- pn = mem_cgroup_nodeinfo(memcg, nid);
+ pn = memcg->nodeinfo[nid];
lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
x = lstatc->count[i];
@@ -4098,7 +4099,7 @@ static int memcg_stat_show(struct seq_fi
unsigned long file_cost = 0;
for_each_online_pgdat(pgdat) {
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ mz = memcg->nodeinfo[pgdat->node_id];
anon_cost += mz->lruvec.anon_cost;
file_cost += mz->lruvec.file_cost;
_
Patches currently in -mm which might be from hannes@cmpxchg.org are
mm-page-writeback-simplify-memcg-handling-in-test_clear_page_writeback.patch
mm-memcontrol-fix-cpuhotplug-statistics-flushing.patch
mm-memcontrol-kill-mem_cgroup_nodeinfo.patch
mm-memcontrol-privatize-memcg_page_state-query-functions.patch
cgroup-rstat-support-cgroup1.patch
cgroup-rstat-punt-root-level-optimization-to-individual-controllers.patch
mm-memcontrol-switch-to-rstat.patch
mm-memcontrol-consolidate-lruvec-stat-flushing.patch
kselftests-cgroup-update-kmem-test-for-new-vmstat-implementation.patch
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2021-03-08 23:08 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-08 23:08 + mm-memcontrol-kill-mem_cgroup_nodeinfo.patch added to -mm tree akpm
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).