linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Bharata B Rao <bharata@linux.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org, aneesh.kumar@linux.ibm.com,
	dennis@kernel.org, tj@kernel.org, cl@linux.com,
	akpm@linux-foundation.org, amakhalov@vmware.com, guro@fb.com,
	vbabka@suse.cz, srikar@linux.vnet.ibm.com, psampat@linux.ibm.com,
	ego@linux.vnet.ibm.com, Bharata B Rao <bharata@linux.ibm.com>
Subject: [RFC PATCH v0 2/3] percpu: Limit percpu allocator to online cpus
Date: Tue,  1 Jun 2021 12:21:46 +0530	[thread overview]
Message-ID: <20210601065147.53735-3-bharata@linux.ibm.com> (raw)
In-Reply-To: <20210601065147.53735-1-bharata@linux.ibm.com>

Now that percpu allocator supports growing of memory
for newly coming up CPU at hotplug time, limit the allocation,
mapping and memcg charging of memory to online CPUs.

Also change the Percpu memory reporting in /proc/meminfo
to reflect the populated pages of only online CPUs.

TODO: Address percpu memcg charging and uncharging from
CPU hotplug callback.

Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
---
 mm/percpu-vm.c | 12 ++++++------
 mm/percpu.c    | 20 +++++++++++++-------
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 3250e1c9aeaf..79ce104c963a 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -118,7 +118,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
 	unsigned int cpu;
 	int i;
 
-	for_each_possible_cpu(cpu) {
+	for_each_online_cpu(cpu) {
 		for (i = page_start; i < page_end; i++) {
 			struct page *page = pages[pcpu_page_idx(cpu, i)];
 
@@ -149,7 +149,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
 
 	gfp |= __GFP_HIGHMEM;
 
-	for_each_possible_cpu(cpu) {
+	for_each_online_cpu(cpu) {
 		for (i = page_start; i < page_end; i++) {
 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
 
@@ -164,7 +164,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
 	while (--i >= page_start)
 		__free_page(pages[pcpu_page_idx(cpu, i)]);
 
-	for_each_possible_cpu(tcpu) {
+	for_each_online_cpu(tcpu) {
 		if (tcpu == cpu)
 			break;
 		for (i = page_start; i < page_end; i++)
@@ -248,7 +248,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
 	unsigned int cpu;
 	int i;
 
-	for_each_possible_cpu(cpu) {
+	for_each_online_cpu(cpu) {
 		for (i = page_start; i < page_end; i++) {
 			struct page *page;
 
@@ -344,7 +344,7 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
 	unsigned int cpu, tcpu;
 	int i, err;
 
-	for_each_possible_cpu(cpu) {
+	for_each_online_cpu(cpu) {
 		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
 				       &pages[pcpu_page_idx(cpu, page_start)],
 				       page_end - page_start);
@@ -357,7 +357,7 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
 	}
 	return 0;
 err:
-	for_each_possible_cpu(tcpu) {
+	for_each_online_cpu(tcpu) {
 		if (tcpu == cpu)
 			break;
 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
diff --git a/mm/percpu.c b/mm/percpu.c
index ca8ca541bede..83b6bcfcfa80 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1594,7 +1594,7 @@ static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
 	if (!objcg)
 		return PCPU_CHUNK_ROOT;
 
-	if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
+	if (obj_cgroup_charge(objcg, gfp, size * num_online_cpus())) {
 		obj_cgroup_put(objcg);
 		return PCPU_FAIL_ALLOC;
 	}
@@ -1615,10 +1615,10 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
 
 		rcu_read_lock();
 		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-				size * num_possible_cpus());
+				size * num_online_cpus());
 		rcu_read_unlock();
 	} else {
-		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+		obj_cgroup_uncharge(objcg, size * num_online_cpus());
 		obj_cgroup_put(objcg);
 	}
 }
@@ -1633,11 +1633,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
 	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
 	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
 
-	obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+	obj_cgroup_uncharge(objcg, size * num_online_cpus());
 
 	rcu_read_lock();
 	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-			-(size * num_possible_cpus()));
+			-(size * num_online_cpus()));
 	rcu_read_unlock();
 
 	obj_cgroup_put(objcg);
@@ -1680,6 +1680,9 @@ static void pcpu_cpuhp_deregister(struct pcpu_chunk *chunk,
 		}
 }
 
+/*
+ * TODO: Grow the memcg charge
+ */
 static void __pcpu_cpuhp_setup(enum pcpu_chunk_type type, unsigned int cpu)
 {
 	int slot;
@@ -1720,6 +1723,9 @@ static int percpu_cpuhp_setup(unsigned int cpu)
 	return 0;
 }
 
+/*
+ * TODO: Reduce the memcg charge
+ */
 static void __pcpu_cpuhp_destroy(enum pcpu_chunk_type type, unsigned int cpu)
 {
 	int slot;
@@ -2000,7 +2006,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 		pcpu_schedule_balance_work();
 
 	/* clear the areas and return address relative to base address */
-	for_each_possible_cpu(cpu)
+	for_each_online_cpu(cpu)
 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
 
 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
@@ -3372,7 +3378,7 @@ void __init setup_per_cpu_areas(void)
  */
 unsigned long pcpu_nr_pages(void)
 {
-	return pcpu_nr_populated * pcpu_nr_units;
+	return pcpu_nr_populated * num_online_cpus();
 }
 
 /*
-- 
2.31.1



  parent reply	other threads:[~2021-06-01  6:52 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-01  6:51 [RFC PATCH v0 0/3] CPU hotplug awareness in percpu allocator Bharata B Rao
2021-06-01  6:51 ` [RFC PATCH v0 1/3] percpu: CPU hotplug support for alloc_percpu() Bharata B Rao
2021-06-01  6:51 ` Bharata B Rao [this message]
2021-06-01  6:51 ` [RFC PATCH v0 3/3] percpu: Avoid using percpu ptrs of non-existing cpus Bharata B Rao
2021-06-02 15:01 ` [RFC PATCH v0 0/3] CPU hotplug awareness in percpu allocator Dennis Zhou
2021-06-04  5:01   ` Bharata B Rao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210601065147.53735-3-bharata@linux.ibm.com \
    --to=bharata@linux.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=amakhalov@vmware.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=cl@linux.com \
    --cc=dennis@kernel.org \
    --cc=ego@linux.vnet.ibm.com \
    --cc=guro@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=psampat@linux.ibm.com \
    --cc=srikar@linux.vnet.ibm.com \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).