linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Pavel Tatashin <pasha.tatashin@oracle.com>
To: linux-kernel@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-mm@kvack.org, linuxppc-dev@lists.ozlabs.org,
	linux-s390@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	x86@kernel.org, kasan-dev@googlegroups.com,
	borntraeger@de.ibm.com, heiko.carstens@de.ibm.com,
	davem@davemloft.net, willy@infradead.org, mhocko@kernel.org,
	ard.biesheuvel@linaro.org, mark.rutland@arm.com,
	will.deacon@arm.com, catalin.marinas@arm.com, sam@ravnborg.org,
	mgorman@techsingularity.net, steven.sistare@oracle.com,
	daniel.m.jordan@oracle.com, bob.picco@oracle.com
Subject: [PATCH v9 12/12] mm: stop zeroing memory during allocation in vmemmap
Date: Wed, 20 Sep 2017 16:17:14 -0400	[thread overview]
Message-ID: <20170920201714.19817-13-pasha.tatashin@oracle.com> (raw)
In-Reply-To: <20170920201714.19817-1-pasha.tatashin@oracle.com>

vmemmap_alloc_block() will no longer zero the block, so zero memory
at its call sites for everything except struct pages.  Struct page memory
is zero'd by struct page initialization.

Replace allocators in sprase-vmemmap to use the non-zeroing version. So,
we will get the performance improvement by zeroing the memory in parallel
when struct pages are zeroed.

Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: Steven Sistare <steven.sistare@oracle.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
---
 include/linux/mm.h  | 11 +++++++++++
 mm/sparse-vmemmap.c | 15 +++++++--------
 mm/sparse.c         |  6 +++---
 3 files changed, 21 insertions(+), 11 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a7bba4ce79ba..25848764570f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2501,6 +2501,17 @@ static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
 	return __vmemmap_alloc_block_buf(size, node, NULL);
 }
 
+static inline void *vmemmap_alloc_block_zero(unsigned long size, int node)
+{
+	void *p = vmemmap_alloc_block(size, node);
+
+	if (!p)
+		return NULL;
+	memset(p, 0, size);
+
+	return p;
+}
+
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
 			       int node);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d1a39b8051e0..c2f5654e7c9d 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -41,7 +41,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
 				unsigned long align,
 				unsigned long goal)
 {
-	return memblock_virt_alloc_try_nid(size, align, goal,
+	return memblock_virt_alloc_try_nid_raw(size, align, goal,
 					    BOOTMEM_ALLOC_ACCESSIBLE, node);
 }
 
@@ -54,9 +54,8 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 	if (slab_is_available()) {
 		struct page *page;
 
-		page = alloc_pages_node(node,
-			GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
-			get_order(size));
+		page = alloc_pages_node(node, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
+					get_order(size));
 		if (page)
 			return page_address(page);
 		return NULL;
@@ -183,7 +182,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
 {
 	pmd_t *pmd = pmd_offset(pud, addr);
 	if (pmd_none(*pmd)) {
-		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		pmd_populate_kernel(&init_mm, pmd, p);
@@ -195,7 +194,7 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
 {
 	pud_t *pud = pud_offset(p4d, addr);
 	if (pud_none(*pud)) {
-		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		pud_populate(&init_mm, pud, p);
@@ -207,7 +206,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
 {
 	p4d_t *p4d = p4d_offset(pgd, addr);
 	if (p4d_none(*p4d)) {
-		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		p4d_populate(&init_mm, p4d, p);
@@ -219,7 +218,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
 {
 	pgd_t *pgd = pgd_offset_k(addr);
 	if (pgd_none(*pgd)) {
-		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		pgd_populate(&init_mm, pgd, p);
diff --git a/mm/sparse.c b/mm/sparse.c
index 83b3bf6461af..d22f51bb7c79 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -437,9 +437,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
 	}
 
 	size = PAGE_ALIGN(size);
-	map = memblock_virt_alloc_try_nid(size * map_count,
-					  PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
-					  BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
+	map = memblock_virt_alloc_try_nid_raw(size * map_count,
+					      PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+					      BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
 	if (map) {
 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 			if (!present_section_nr(pnum))
-- 
2.14.1

  parent reply	other threads:[~2017-09-20 20:18 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-20 20:17 [PATCH v9 00/12] complete deferred page initialization Pavel Tatashin
2017-09-20 20:17 ` [PATCH v9 01/12] x86/mm: setting fields in deferred pages Pavel Tatashin
2017-10-03 12:26   ` Michal Hocko
2017-10-03 15:07     ` Pasha Tatashin
2017-09-20 20:17 ` [PATCH v9 02/12] sparc64/mm: " Pavel Tatashin
2017-10-03 12:28   ` Michal Hocko
2017-10-03 15:10     ` Pasha Tatashin
2017-09-20 20:17 ` [PATCH v9 03/12] mm: deferred_init_memmap improvements Pavel Tatashin
2017-10-03 12:57   ` Michal Hocko
2017-10-03 15:15     ` Pasha Tatashin
2017-10-03 16:01       ` Pasha Tatashin
2017-10-04  8:48         ` Michal Hocko
2017-09-20 20:17 ` [PATCH v9 04/12] sparc64: simplify vmemmap_populate Pavel Tatashin
2017-10-03 12:59   ` Michal Hocko
2017-10-03 15:20     ` Pasha Tatashin
2017-09-20 20:17 ` [PATCH v9 05/12] mm: defining memblock_virt_alloc_try_nid_raw Pavel Tatashin
2017-09-20 20:17 ` [PATCH v9 06/12] mm: zero struct pages during initialization Pavel Tatashin
2017-10-03 13:08   ` Michal Hocko
2017-10-03 15:22     ` Pasha Tatashin
2017-10-04  8:45       ` Michal Hocko
2017-10-04 12:26         ` Pasha Tatashin
2017-09-20 20:17 ` [PATCH v9 07/12] sparc64: optimized struct page zeroing Pavel Tatashin
2017-09-20 20:17 ` [PATCH v9 08/12] mm: zero reserved and unavailable struct pages Pavel Tatashin
2017-10-03 13:18   ` Michal Hocko
2017-10-03 15:29     ` Pasha Tatashin
2017-10-04  8:56       ` Michal Hocko
2017-10-04 12:40         ` Pasha Tatashin
2017-10-04 12:57           ` Michal Hocko
2017-10-04 13:28             ` Pasha Tatashin
2017-10-04 14:04               ` Michal Hocko
2017-10-04 15:08                 ` Pasha Tatashin
2017-09-20 20:17 ` [PATCH v9 09/12] mm/kasan: kasan specific map populate function Pavel Tatashin
2017-10-03 14:48   ` Mark Rutland
2017-10-03 15:04     ` Pasha Tatashin
2017-10-09 17:13     ` Will Deacon
2017-10-09 17:51       ` Pavel Tatashin
2017-10-09 18:14         ` Michal Hocko
2017-10-09 18:48           ` Will Deacon
2017-10-09 18:22         ` Will Deacon
2017-10-09 18:42           ` Pavel Tatashin
2017-10-09 18:48             ` Will Deacon
2017-10-09 18:59               ` Pavel Tatashin
2017-10-09 19:02                 ` Will Deacon
2017-10-09 19:07                   ` Pavel Tatashin
2017-10-09 19:57                     ` Pavel Tatashin
2017-09-20 20:17 ` [PATCH v9 10/12] x86/kasan: use kasan_map_populate() Pavel Tatashin
2017-09-20 20:17 ` [PATCH v9 11/12] arm64/kasan: " Pavel Tatashin
2017-09-20 20:17 ` Pavel Tatashin [this message]
2017-10-03 13:19   ` [PATCH v9 12/12] mm: stop zeroing memory during allocation in vmemmap Michal Hocko
2017-10-03 15:34     ` Pasha Tatashin
2017-10-03 20:26       ` Pasha Tatashin
2017-10-04  8:45         ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170920201714.19817-13-pasha.tatashin@oracle.com \
    --to=pasha.tatashin@oracle.com \
    --cc=ard.biesheuvel@linaro.org \
    --cc=bob.picco@oracle.com \
    --cc=borntraeger@de.ibm.com \
    --cc=catalin.marinas@arm.com \
    --cc=daniel.m.jordan@oracle.com \
    --cc=davem@davemloft.net \
    --cc=heiko.carstens@de.ibm.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mark.rutland@arm.com \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@kernel.org \
    --cc=sam@ravnborg.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=steven.sistare@oracle.com \
    --cc=will.deacon@arm.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).