From: Leonardo Bras <leobras@redhat.com> To: Johannes Weiner <hannes@cmpxchg.org>, Michal Hocko <mhocko@kernel.org>, Roman Gushchin <roman.gushchin@linux.dev>, Shakeel Butt <shakeelb@google.com>, Muchun Song <muchun.song@linux.dev>, Andrew Morton <akpm@linux-foundation.org>, Marcelo Tosatti <mtosatti@redhat.com> Cc: Leonardo Bras <leobras@redhat.com>, cgroups@vger.kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 2/5] mm/memcontrol: Change stock_lock type from local_lock_t to spinlock_t Date: Wed, 25 Jan 2023 04:34:59 -0300 [thread overview] Message-ID: <20230125073502.743446-3-leobras@redhat.com> (raw) In-Reply-To: <20230125073502.743446-1-leobras@redhat.com> In this context, since it's using per-cpu variables, changing from local_lock to spinlock should not deal much impact in performance and can allow operations such as stock draining to happen in remote cpus. Why performance would probably not get impacted: 1 - Since the lock is in the same cache line as the information that is written next, there is no much extra memory access cost for using the lock. 2 - Since it's a percpu struct, there should be rare for other cpu to share this cacheline, so there should be rare to need cacheline invalidation, and writing to the lock should be cheap since there is always a write to next struct members. 3 - Even the write in (2) could be pipelined and batched with following writes to the cacheline (such as nr_pages member), further decreasing the impact of this change. Suggested-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Leonardo Bras <leobras@redhat.com> --- mm/memcontrol.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f8e86b88b3c7a..1d5c108413c83 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2172,7 +2172,7 @@ void unlock_page_memcg(struct page *page) } struct memcg_stock_pcp { - local_lock_t stock_lock; + spinlock_t stock_lock; /* Protects the percpu struct */ struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; @@ -2190,7 +2190,7 @@ struct memcg_stock_pcp { }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct memcg_stock_pcp, memcg_stock) = { - .stock_lock = INIT_LOCAL_LOCK(stock_lock), + .stock_lock = __SPIN_LOCK_UNLOCKED(stock_lock), }; static DEFINE_MUTEX(percpu_charge_mutex); @@ -2235,15 +2235,15 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > MEMCG_CHARGE_BATCH) return ret; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; ret = true; } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); return ret; } @@ -2280,14 +2280,14 @@ static void drain_local_stock(struct work_struct *dummy) * drain_stock races is that we always operate on local CPU stock * here with IRQ disabled */ - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + old = drain_obj_stock(stock); drain_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); } @@ -2315,10 +2315,12 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { unsigned long flags; + struct memcg_stock_pcp *stock; - local_lock_irqsave(&memcg_stock.stock_lock, flags); + stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); __refill_stock(memcg, nr_pages); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); } /* @@ -3165,8 +3167,8 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, unsigned long flags; int *bytes; - local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); /* * Save vmstat data in stock and skip vmstat array update unless @@ -3218,7 +3220,7 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, if (nr) mod_objcg_mlstate(objcg, pgdat, idx, nr); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); } @@ -3229,15 +3231,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) unsigned long flags; bool ret = false; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { stock->nr_bytes -= nr_bytes; ret = true; } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); return ret; } @@ -3327,9 +3329,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, unsigned long flags; unsigned int nr_pages = 0; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (stock->cached_objcg != objcg) { /* reset if necessary */ old = drain_obj_stock(stock); obj_cgroup_get(objcg); @@ -3345,7 +3347,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock->nr_bytes &= (PAGE_SIZE - 1); } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); -- 2.39.1
WARNING: multiple messages have this Message-ID (diff)
From: Leonardo Bras <leobras-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> To: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>, Michal Hocko <mhocko-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>, Roman Gushchin <roman.gushchin-fxUVXftIFDnyG1zEObXtfA@public.gmane.org>, Shakeel Butt <shakeelb-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>, Muchun Song <muchun.song-fxUVXftIFDnyG1zEObXtfA@public.gmane.org>, Andrew Morton <akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>, Marcelo Tosatti <mtosatti-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> Cc: Leonardo Bras <leobras-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>, cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org Subject: [PATCH v2 2/5] mm/memcontrol: Change stock_lock type from local_lock_t to spinlock_t Date: Wed, 25 Jan 2023 04:34:59 -0300 [thread overview] Message-ID: <20230125073502.743446-3-leobras@redhat.com> (raw) In-Reply-To: <20230125073502.743446-1-leobras-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> In this context, since it's using per-cpu variables, changing from local_lock to spinlock should not deal much impact in performance and can allow operations such as stock draining to happen in remote cpus. Why performance would probably not get impacted: 1 - Since the lock is in the same cache line as the information that is written next, there is no much extra memory access cost for using the lock. 2 - Since it's a percpu struct, there should be rare for other cpu to share this cacheline, so there should be rare to need cacheline invalidation, and writing to the lock should be cheap since there is always a write to next struct members. 3 - Even the write in (2) could be pipelined and batched with following writes to the cacheline (such as nr_pages member), further decreasing the impact of this change. Suggested-by: Marcelo Tosatti <mtosatti-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> Signed-off-by: Leonardo Bras <leobras-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org> --- mm/memcontrol.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f8e86b88b3c7a..1d5c108413c83 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2172,7 +2172,7 @@ void unlock_page_memcg(struct page *page) } struct memcg_stock_pcp { - local_lock_t stock_lock; + spinlock_t stock_lock; /* Protects the percpu struct */ struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; @@ -2190,7 +2190,7 @@ struct memcg_stock_pcp { }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct memcg_stock_pcp, memcg_stock) = { - .stock_lock = INIT_LOCAL_LOCK(stock_lock), + .stock_lock = __SPIN_LOCK_UNLOCKED(stock_lock), }; static DEFINE_MUTEX(percpu_charge_mutex); @@ -2235,15 +2235,15 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > MEMCG_CHARGE_BATCH) return ret; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; ret = true; } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); return ret; } @@ -2280,14 +2280,14 @@ static void drain_local_stock(struct work_struct *dummy) * drain_stock races is that we always operate on local CPU stock * here with IRQ disabled */ - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + old = drain_obj_stock(stock); drain_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); } @@ -2315,10 +2315,12 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { unsigned long flags; + struct memcg_stock_pcp *stock; - local_lock_irqsave(&memcg_stock.stock_lock, flags); + stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); __refill_stock(memcg, nr_pages); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); } /* @@ -3165,8 +3167,8 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, unsigned long flags; int *bytes; - local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); /* * Save vmstat data in stock and skip vmstat array update unless @@ -3218,7 +3220,7 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, if (nr) mod_objcg_mlstate(objcg, pgdat, idx, nr); - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); } @@ -3229,15 +3231,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) unsigned long flags; bool ret = false; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { stock->nr_bytes -= nr_bytes; ret = true; } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); return ret; } @@ -3327,9 +3329,9 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, unsigned long flags; unsigned int nr_pages = 0; - local_lock_irqsave(&memcg_stock.stock_lock, flags); - stock = this_cpu_ptr(&memcg_stock); + spin_lock_irqsave(&stock->stock_lock, flags); + if (stock->cached_objcg != objcg) { /* reset if necessary */ old = drain_obj_stock(stock); obj_cgroup_get(objcg); @@ -3345,7 +3347,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock->nr_bytes &= (PAGE_SIZE - 1); } - local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + spin_unlock_irqrestore(&stock->stock_lock, flags); if (old) obj_cgroup_put(old); -- 2.39.1
next prev parent reply other threads:[~2023-01-25 7:36 UTC|newest] Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top 2023-01-25 7:34 [PATCH v2 0/5] Introduce memcg_stock_pcp remote draining Leonardo Bras 2023-01-25 7:34 ` Leonardo Bras 2023-01-25 7:34 ` [PATCH v2 1/5] mm/memcontrol: Align percpu memcg_stock to cache Leonardo Bras 2023-01-25 7:34 ` Leonardo Bras 2023-01-25 7:34 ` Leonardo Bras [this message] 2023-01-25 7:34 ` [PATCH v2 2/5] mm/memcontrol: Change stock_lock type from local_lock_t to spinlock_t Leonardo Bras 2023-01-25 7:35 ` [PATCH v2 3/5] mm/memcontrol: Reorder memcg_stock_pcp members to avoid holes Leonardo Bras 2023-01-25 7:35 ` Leonardo Bras 2023-01-25 7:35 ` [PATCH v2 4/5] mm/memcontrol: Perform all stock drain in current CPU Leonardo Bras 2023-01-25 7:35 ` [PATCH v2 5/5] mm/memcontrol: Remove flags from memcg_stock_pcp Leonardo Bras 2023-01-25 7:35 ` Leonardo Bras 2023-01-25 8:33 ` [PATCH v2 0/5] Introduce memcg_stock_pcp remote draining Michal Hocko 2023-01-25 8:33 ` Michal Hocko 2023-01-25 11:06 ` Leonardo Brás 2023-01-25 11:06 ` Leonardo Brás 2023-01-25 11:39 ` Michal Hocko 2023-01-25 11:39 ` Michal Hocko 2023-01-25 18:22 ` Marcelo Tosatti 2023-01-25 18:22 ` Marcelo Tosatti 2023-01-25 23:14 ` Roman Gushchin 2023-01-25 23:14 ` Roman Gushchin 2023-01-26 7:41 ` Michal Hocko 2023-01-26 7:41 ` Michal Hocko 2023-01-26 18:03 ` Marcelo Tosatti 2023-01-26 19:20 ` Michal Hocko 2023-01-27 0:32 ` Marcelo Tosatti 2023-01-27 0:32 ` Marcelo Tosatti 2023-01-27 6:58 ` Michal Hocko 2023-01-27 6:58 ` Michal Hocko 2023-02-01 18:31 ` Roman Gushchin 2023-01-26 23:12 ` Roman Gushchin 2023-01-27 7:11 ` Michal Hocko 2023-01-27 7:11 ` Michal Hocko 2023-01-27 7:22 ` Leonardo Brás 2023-01-27 8:12 ` Leonardo Brás 2023-01-27 8:12 ` Leonardo Brás 2023-01-27 9:23 ` Michal Hocko 2023-01-27 9:23 ` Michal Hocko 2023-01-27 13:03 ` Frederic Weisbecker 2023-01-27 13:03 ` Frederic Weisbecker 2023-01-27 13:58 ` Michal Hocko 2023-01-27 13:58 ` Michal Hocko 2023-01-27 18:18 ` Roman Gushchin 2023-01-27 18:18 ` Roman Gushchin 2023-02-03 15:21 ` Michal Hocko 2023-02-03 15:21 ` Michal Hocko 2023-02-03 19:25 ` Roman Gushchin 2023-02-03 19:25 ` Roman Gushchin 2023-02-13 13:36 ` Michal Hocko 2023-02-13 13:36 ` Michal Hocko 2023-01-27 7:14 ` Leonardo Brás 2023-01-27 7:14 ` Leonardo Brás 2023-01-27 7:20 ` Michal Hocko 2023-01-27 7:20 ` Michal Hocko 2023-01-27 7:35 ` Leonardo Brás 2023-01-27 9:29 ` Michal Hocko 2023-01-27 19:29 ` Leonardo Brás 2023-01-27 19:29 ` Leonardo Brás 2023-01-27 23:50 ` Roman Gushchin 2023-01-26 18:19 ` Marcelo Tosatti 2023-01-26 18:19 ` Marcelo Tosatti 2023-01-27 5:40 ` Leonardo Brás 2023-01-27 5:40 ` Leonardo Brás 2023-01-26 2:01 ` Hillf Danton 2023-01-26 7:45 ` Michal Hocko 2023-01-26 7:45 ` Michal Hocko 2023-01-26 18:14 ` Marcelo Tosatti 2023-01-26 18:14 ` Marcelo Tosatti 2023-01-26 19:13 ` Michal Hocko 2023-01-26 19:13 ` Michal Hocko 2023-01-27 6:55 ` Leonardo Brás 2023-01-27 6:55 ` Leonardo Brás 2023-01-31 11:35 ` Marcelo Tosatti 2023-01-31 11:35 ` Marcelo Tosatti 2023-02-01 4:36 ` Leonardo Brás 2023-02-01 12:52 ` Michal Hocko 2023-02-01 12:52 ` Michal Hocko 2023-02-01 12:41 ` Michal Hocko 2023-02-01 12:41 ` Michal Hocko 2023-02-04 4:55 ` Leonardo Brás 2023-02-04 4:55 ` Leonardo Brás 2023-02-05 19:49 ` Roman Gushchin 2023-02-07 3:18 ` Leonardo Brás 2023-02-08 19:23 ` Roman Gushchin 2023-02-08 19:23 ` Roman Gushchin
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20230125073502.743446-3-leobras@redhat.com \ --to=leobras@redhat.com \ --cc=akpm@linux-foundation.org \ --cc=cgroups@vger.kernel.org \ --cc=hannes@cmpxchg.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=mhocko@kernel.org \ --cc=mtosatti@redhat.com \ --cc=muchun.song@linux.dev \ --cc=roman.gushchin@linux.dev \ --cc=shakeelb@google.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.