linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Gavin Shan <gshan@redhat.com>
To: linux-arm-kernel@lists.infradead.org
Cc: mark.rutland@arm.com, anshuman.khandual@arm.com,
	catalin.marinas@arm.com, will@kernel.org,
	linux-kernel@vger.kernel.org, shan.gavin@gmail.com
Subject: [PATCH 2/2] arm64/mm: Enable color zero pages
Date: Wed, 16 Sep 2020 13:25:23 +1000	[thread overview]
Message-ID: <20200916032523.13011-3-gshan@redhat.com> (raw)
In-Reply-To: <20200916032523.13011-1-gshan@redhat.com>

This enables color zero pages by allocating contigous page frames
for it. The number of pages for this is determined by L1 dCache
(or iCache) size, which is probbed from the hardware.

   * Add cache_total_size() to return L1 dCache (or iCache) size

   * Implement setup_zero_pages(), which is called after the page
     allocator begins to work, to allocate the contigous pages
     needed by color zero page.

   * Reworked ZERO_PAGE() and define __HAVE_COLOR_ZERO_PAGE.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/cache.h   | 22 ++++++++++++++++++++
 arch/arm64/include/asm/pgtable.h |  9 ++++++--
 arch/arm64/kernel/cacheinfo.c    | 34 +++++++++++++++++++++++++++++++
 arch/arm64/mm/init.c             | 35 ++++++++++++++++++++++++++++++++
 arch/arm64/mm/mmu.c              |  7 -------
 5 files changed, 98 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a4d1b5f771f6..420e9dde2c51 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -39,6 +39,27 @@
 #define CLIDR_LOC(clidr)	(((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
 #define CLIDR_LOUIS(clidr)	(((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
 
+#define CSSELR_TND_SHIFT	4
+#define CSSELR_TND_MASK		(UL(1) << CSSELR_TND_SHIFT)
+#define CSSELR_LEVEL_SHIFT	1
+#define CSSELR_LEVEL_MASK	(UL(7) << CSSELR_LEVEL_SHIFT)
+#define CSSELR_IND_SHIFT	0
+#define CSSERL_IND_MASK		(UL(1) << CSSELR_IND_SHIFT)
+
+#define CCSIDR_64_LS_SHIFT	0
+#define CCSIDR_64_LS_MASK	(UL(7) << CCSIDR_64_LS_SHIFT)
+#define CCSIDR_64_ASSOC_SHIFT	3
+#define CCSIDR_64_ASSOC_MASK	(UL(0x1FFFFF) << CCSIDR_64_ASSOC_SHIFT)
+#define CCSIDR_64_SET_SHIFT	32
+#define CCSIDR_64_SET_MASK	(UL(0xFFFFFF) << CCSIDR_64_SET_SHIFT)
+
+#define CCSIDR_32_LS_SHIFT	0
+#define CCSIDR_32_LS_MASK	(UL(7) << CCSIDR_32_LS_SHIFT)
+#define CCSIDR_32_ASSOC_SHIFT	3
+#define CCSIDR_32_ASSOC_MASK	(UL(0x3FF) << CCSIDR_32_ASSOC_SHIFT)
+#define CCSIDR_32_SET_SHIFT	13
+#define CCSIDR_32_SET_MASK	(UL(0x7FFF) << CCSIDR_32_SET_SHIFT)
+
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
  * sure that all such allocations are cache aligned. Otherwise,
@@ -89,6 +110,7 @@ static inline int cache_line_size_of_cpu(void)
 }
 
 int cache_line_size(void);
+int cache_total_size(void);
 
 /*
  * Read the effective value of CTR_EL0.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6953498f4d40..5cb5f8bb090d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -54,8 +54,13 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define __HAVE_COLOR_ZERO_PAGE
+#define ZERO_PAGE(vaddr)				\
+	(virt_to_page((void *)(empty_zero_page +	\
+	(((unsigned long)(vaddr)) & zero_page_mask))))
 
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 7fa6828bb488..d3b9ab757014 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -43,6 +43,40 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
 	this_leaf->type = type;
 }
 
+int cache_total_size(void)
+{
+	unsigned int ctype, size;
+	unsigned long val;
+	bool ccidx = false;
+
+	/* Check first level cache is supported */
+	ctype = get_cache_type(1);
+	if (ctype == CACHE_TYPE_NOCACHE)
+		return 0;
+
+	/* ARMv8.3-CCIDX is supported or not */
+	val = read_sanitised_ftr_reg(SYS_ID_MMFR4_EL1);
+	ccidx = !!(val & (UL(0xF) << ID_AA64MMFR2_CCIDX_SHIFT));
+
+	/* Retrieve the information and calculate the total size */
+	val = FIELD_PREP(CSSELR_LEVEL_MASK, 0) |
+	      FIELD_PREP(CSSERL_IND_MASK, 0);
+	write_sysreg(val, csselr_el1);
+
+	val = read_sysreg(ccsidr_el1);
+	if (ccidx) {
+		size = (1 << FIELD_GET(CCSIDR_64_LS_MASK, val)) *
+		       (FIELD_GET(CCSIDR_64_ASSOC_MASK, val) + 1) *
+		       (FIELD_GET(CCSIDR_64_SET_MASK, val) + 1);
+	} else {
+		size = (1 << FIELD_GET(CCSIDR_32_LS_MASK, val)) *
+		       (FIELD_GET(CCSIDR_32_ASSOC_MASK, val) + 1) *
+		       (FIELD_GET(CCSIDR_32_SET_MASK, val) + 1);
+	}
+
+	return size;
+}
+
 static int __init_cache_level(unsigned int cpu)
 {
 	unsigned int ctype, level, leaves, fw_level;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 481d22c32a2e..ca6b3cddafb7 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -69,6 +69,11 @@ EXPORT_SYMBOL(vmemmap);
 phys_addr_t arm64_dma_phys_limit __ro_after_init;
 static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
 
+unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+unsigned long zero_page_mask;
+EXPORT_SYMBOL(zero_page_mask);
+
 #ifdef CONFIG_KEXEC_CORE
 /*
  * reserve_crashkernel() - reserves memory for crash kernel
@@ -507,6 +512,35 @@ static void __init free_unused_memmap(void)
 }
 #endif	/* !CONFIG_SPARSEMEM_VMEMMAP */
 
+static void __init setup_zero_pages(void)
+{
+	struct page *page;
+	int order, size, i;
+
+	size = cache_total_size();
+	order = size > 0 ? get_order(PAGE_ALIGN(size)) : 0;
+	order = min(order, MAX_ORDER - 1);
+
+	do {
+		empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+						   order);
+		if (empty_zero_page)
+			break;
+	} while (--order >= 0);
+
+	if (!empty_zero_page)
+		panic("%s: out of memory\n", __func__);
+
+	page = virt_to_page((void *) empty_zero_page);
+	split_page(page, order);
+	for (i = 1 << order; i > 0; i--) {
+		mark_page_reserved(page);
+		page++;
+	}
+
+	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
+}
+
 /*
  * mem_init() marks the free areas in the mem_map and tells us how much memory
  * is free.  This is done after various parts of the system have claimed their
@@ -527,6 +561,7 @@ void __init mem_init(void)
 #endif
 	/* this will put all unused low memory onto the freelists */
 	memblock_free_all();
+	setup_zero_pages();
 
 	mem_init_print_info(NULL);
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 75df62fea1b6..736939ab3b4f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -49,13 +49,6 @@ EXPORT_SYMBOL(vabits_actual);
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
 
-/*
- * Empty_zero_page is a special page that is used for zero-initialized data
- * and COW.
- */
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
-- 
2.23.0


  parent reply	other threads:[~2020-09-16  3:26 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-16  3:25 [PATCH 0/2] arm64/mm: Enable color zero pages Gavin Shan
2020-09-16  3:25 ` [PATCH 1/2] arm64/mm: Introduce zero PGD table Gavin Shan
2020-09-16  3:25 ` Gavin Shan [this message]
2020-09-16  8:28   ` [PATCH 2/2] arm64/mm: Enable color zero pages Will Deacon
2020-09-16 10:46     ` Robin Murphy
2020-09-17  4:36       ` Gavin Shan
2020-09-17  3:35     ` Gavin Shan
2020-09-17 10:22       ` Robin Murphy
2020-09-21  2:56         ` Gavin Shan
2020-09-21 12:40           ` Anshuman Khandual
2020-09-22 12:39             ` Gavin Shan
2020-09-18 12:10   ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200916032523.13011-3-gshan@redhat.com \
    --to=gshan@redhat.com \
    --cc=anshuman.khandual@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=shan.gavin@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).