All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pingfan Liu <kernelfans@gmail.com>
To: linux-arm-kernel@lists.infradead.org
Cc: Pingfan Liu <kernelfans@gmail.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Kristina Martsenko <kristina.martsenko@arm.com>,
	James Morse <james.morse@arm.com>,
	Steven Price <steven.price@arm.com>,
	Jonathan Cameron <Jonathan.Cameron@huawei.com>,
	Pavel Tatashin <pasha.tatashin@soleen.com>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Atish Patra <atish.patra@wdc.com>,
	Mike Rapoport <rppt@kernel.org>,
	Logan Gunthorpe <logang@deltatee.com>,
	Mark Brown <broonie@kernel.org>
Subject: [RFC 1/8] arm64/mm: split out __create_pgd_mapping() routines
Date: Sat, 10 Apr 2021 17:56:47 +0800	[thread overview]
Message-ID: <20210410095654.24102-2-kernelfans@gmail.com> (raw)
In-Reply-To: <20210410095654.24102-1-kernelfans@gmail.com>

Split out the routines for __create_pgd_mapping(), in order to use it
to generate two sets of operations for CONFIG_PGTABLE_LEVELS and
CONFIG_PGTABLE_LEVELS + 1

Later the one generated with 'CONFIG_PGTABLE_LEVELS + 1' can be used for
idmap if VA_BITS is too small to cover system RAM, which is located
sufficiently high in the physical address space.

Later, idmap can be created by __create_pgd_mapping() directly.

Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Kristina Martsenko <kristina.martsenko@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Atish Patra <atish.patra@wdc.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Mark Brown <broonie@kernel.org>
To: linux-arm-kernel@lists.infradead.org
---
 arch/arm64/Kconfig          |   4 +
 arch/arm64/mm/Makefile      |   2 +
 arch/arm64/mm/idmap_mmu.c   |  45 ++++++
 arch/arm64/mm/mmu.c         | 263 +-----------------------------------
 arch/arm64/mm/mmu_include.c | 262 +++++++++++++++++++++++++++++++++++
 5 files changed, 315 insertions(+), 261 deletions(-)
 create mode 100644 arch/arm64/mm/idmap_mmu.c
 create mode 100644 arch/arm64/mm/mmu_include.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e4e1b6550115..989fc501a1b4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -327,6 +327,10 @@ config PGTABLE_LEVELS
 	default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
 	default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
 
+config IDMAP_PGTABLE_EXPAND
+	def_bool y
+	depends on (ARM64_4K_PAGES && ARM64_VA_BITS_39) || (ARM64_64K_PAGES && ARM64_VA_BITS_42)
+
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index f188c9092696..f9283cb9a201 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,6 +3,8 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \
 				   cache.o copypage.o flush.o \
 				   ioremap.o mmap.o pgd.o mmu.o \
 				   context.o proc.o pageattr.o
+
+obj-$(CONFIG_IDMAP_PGTABLE_EXPAND)	+= idmap_mmu.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_PTDUMP_CORE)	+= ptdump.o
 obj-$(CONFIG_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
diff --git a/arch/arm64/mm/idmap_mmu.c b/arch/arm64/mm/idmap_mmu.c
new file mode 100644
index 000000000000..7e9a4f4017d3
--- /dev/null
+++ b/arch/arm64/mm/idmap_mmu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/barrier.h>
+#include <asm/cputype.h>
+#include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <linux/sizes.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/ptdump.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#if CONFIG_IDMAP_PGTABLE_EXPAND
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#define EXTEND_LEVEL 3
+#elif CONFIG_PGTABLE_LEVELS == 3
+#define EXTEND_LEVEL 4
+#endif
+
+#undef CONFIG_PGTABLE_LEVELS
+#define CONFIG_PGTABLE_LEVELS EXTEND_LEVEL
+
+
+#include "./mmu_include.c"
+
+void __create_pgd_mapping_extend(pgd_t *pgdir, unsigned int entries_cnt, phys_addr_t phys,
+				 unsigned long virt, phys_addr_t size,
+				 pgprot_t prot,
+				 phys_addr_t (*pgtable_alloc)(int),
+				 int flags)
+{
+	__create_pgd_mapping(pgdir, entries_cnt, phys, virt, size, prot, pgtable_alloc, flags);
+}
+#endif
+
+
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5d9550fdb9cf..56e4f25e8d6d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -37,9 +37,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
 
-#define NO_BLOCK_MAPPINGS	BIT(0)
-#define NO_CONT_MAPPINGS	BIT(1)
-
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
@@ -116,264 +113,6 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
 	return phys;
 }
 
-static bool pgattr_change_is_safe(u64 old, u64 new)
-{
-	/*
-	 * The following mapping attributes may be updated in live
-	 * kernel mappings without the need for break-before-make.
-	 */
-	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
-
-	/* creating or taking down mappings is always safe */
-	if (old == 0 || new == 0)
-		return true;
-
-	/* live contiguous mappings may not be manipulated at all */
-	if ((old | new) & PTE_CONT)
-		return false;
-
-	/* Transitioning from Non-Global to Global is unsafe */
-	if (old & ~new & PTE_NG)
-		return false;
-
-	/*
-	 * Changing the memory type between Normal and Normal-Tagged is safe
-	 * since Tagged is considered a permission attribute from the
-	 * mismatched attribute aliases perspective.
-	 */
-	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
-	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
-	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
-	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
-		mask |= PTE_ATTRINDX_MASK;
-
-	return ((old ^ new) & ~mask) == 0;
-}
-
-static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot)
-{
-	pte_t *ptep;
-
-	ptep = pte_set_fixmap_offset(pmdp, addr);
-	do {
-		pte_t old_pte = READ_ONCE(*ptep);
-
-		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
-
-		/*
-		 * After the PTE entry has been populated once, we
-		 * only allow updates to the permission attributes.
-		 */
-		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
-					      READ_ONCE(pte_val(*ptep))));
-
-		phys += PAGE_SIZE;
-	} while (ptep++, addr += PAGE_SIZE, addr != end);
-
-	pte_clear_fixmap();
-}
-
-static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
-				unsigned long end, phys_addr_t phys,
-				pgprot_t prot,
-				phys_addr_t (*pgtable_alloc)(int),
-				int flags)
-{
-	unsigned long next;
-	pmd_t pmd = READ_ONCE(*pmdp);
-
-	BUG_ON(pmd_sect(pmd));
-	if (pmd_none(pmd)) {
-		phys_addr_t pte_phys;
-		BUG_ON(!pgtable_alloc);
-		pte_phys = pgtable_alloc(PAGE_SHIFT);
-		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
-		pmd = READ_ONCE(*pmdp);
-	}
-	BUG_ON(pmd_bad(pmd));
-
-	do {
-		pgprot_t __prot = prot;
-
-		next = pte_cont_addr_end(addr, end);
-
-		/* use a contiguous mapping if the range is suitably aligned */
-		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
-		    (flags & NO_CONT_MAPPINGS) == 0)
-			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
-
-		init_pte(pmdp, addr, next, phys, __prot);
-
-		phys += next - addr;
-	} while (addr = next, addr != end);
-}
-
-static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot,
-		     phys_addr_t (*pgtable_alloc)(int), int flags)
-{
-	unsigned long next;
-	pmd_t *pmdp;
-
-	pmdp = pmd_set_fixmap_offset(pudp, addr);
-	do {
-		pmd_t old_pmd = READ_ONCE(*pmdp);
-
-		next = pmd_addr_end(addr, end);
-
-		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
-			pmd_set_huge(pmdp, phys, prot);
-
-			/*
-			 * After the PMD entry has been populated once, we
-			 * only allow updates to the permission attributes.
-			 */
-			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
-						      READ_ONCE(pmd_val(*pmdp))));
-		} else {
-			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
-					    pgtable_alloc, flags);
-
-			BUG_ON(pmd_val(old_pmd) != 0 &&
-			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
-		}
-		phys += next - addr;
-	} while (pmdp++, addr = next, addr != end);
-
-	pmd_clear_fixmap();
-}
-
-static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
-				unsigned long end, phys_addr_t phys,
-				pgprot_t prot,
-				phys_addr_t (*pgtable_alloc)(int), int flags)
-{
-	unsigned long next;
-	pud_t pud = READ_ONCE(*pudp);
-
-	/*
-	 * Check for initial section mappings in the pgd/pud.
-	 */
-	BUG_ON(pud_sect(pud));
-	if (pud_none(pud)) {
-		phys_addr_t pmd_phys;
-		BUG_ON(!pgtable_alloc);
-		pmd_phys = pgtable_alloc(PMD_SHIFT);
-		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
-		pud = READ_ONCE(*pudp);
-	}
-	BUG_ON(pud_bad(pud));
-
-	do {
-		pgprot_t __prot = prot;
-
-		next = pmd_cont_addr_end(addr, end);
-
-		/* use a contiguous mapping if the range is suitably aligned */
-		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
-		    (flags & NO_CONT_MAPPINGS) == 0)
-			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
-
-		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
-
-		phys += next - addr;
-	} while (addr = next, addr != end);
-}
-
-static inline bool use_1G_block(unsigned long addr, unsigned long next,
-			unsigned long phys)
-{
-	if (PAGE_SHIFT != 12)
-		return false;
-
-	if (((addr | next | phys) & ~PUD_MASK) != 0)
-		return false;
-
-	return true;
-}
-
-static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
-			   phys_addr_t phys, pgprot_t prot,
-			   phys_addr_t (*pgtable_alloc)(int),
-			   int flags)
-{
-	unsigned long next;
-	pud_t *pudp;
-	p4d_t *p4dp = p4d_offset(pgdp, addr);
-	p4d_t p4d = READ_ONCE(*p4dp);
-
-	if (p4d_none(p4d)) {
-		phys_addr_t pud_phys;
-		BUG_ON(!pgtable_alloc);
-		pud_phys = pgtable_alloc(PUD_SHIFT);
-		__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
-		p4d = READ_ONCE(*p4dp);
-	}
-	BUG_ON(p4d_bad(p4d));
-
-	pudp = pud_set_fixmap_offset(p4dp, addr);
-	do {
-		pud_t old_pud = READ_ONCE(*pudp);
-
-		next = pud_addr_end(addr, end);
-
-		/*
-		 * For 4K granule only, attempt to put down a 1GB block
-		 */
-		if (use_1G_block(addr, next, phys) &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
-			pud_set_huge(pudp, phys, prot);
-
-			/*
-			 * After the PUD entry has been populated once, we
-			 * only allow updates to the permission attributes.
-			 */
-			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
-						      READ_ONCE(pud_val(*pudp))));
-		} else {
-			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
-					    pgtable_alloc, flags);
-
-			BUG_ON(pud_val(old_pud) != 0 &&
-			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
-		}
-		phys += next - addr;
-	} while (pudp++, addr = next, addr != end);
-
-	pud_clear_fixmap();
-}
-
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
-				 unsigned long virt, phys_addr_t size,
-				 pgprot_t prot,
-				 phys_addr_t (*pgtable_alloc)(int),
-				 int flags)
-{
-	unsigned long addr, end, next;
-	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
-
-	/*
-	 * If the virtual and physical address don't have the same offset
-	 * within a page, we cannot map the region as the caller expects.
-	 */
-	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
-		return;
-
-	phys &= PAGE_MASK;
-	addr = virt & PAGE_MASK;
-	end = PAGE_ALIGN(virt + size);
-
-	do {
-		next = pgd_addr_end(addr, end);
-		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
-			       flags);
-		phys += next - addr;
-	} while (pgdp++, addr = next, addr != end);
-}
-
 static phys_addr_t __pgd_pgtable_alloc(int shift)
 {
 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
@@ -404,6 +143,8 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
 	return pa;
 }
 
+#include "./mmu_include.c"
+
 /*
  * This function can only be used to modify existing table entries,
  * without allocating new levels of table. Note that this permits the
diff --git a/arch/arm64/mm/mmu_include.c b/arch/arm64/mm/mmu_include.c
new file mode 100644
index 000000000000..e9ebdffe860b
--- /dev/null
+++ b/arch/arm64/mm/mmu_include.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define NO_BLOCK_MAPPINGS	BIT(0)
+#define NO_CONT_MAPPINGS	BIT(1)
+
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+	/*
+	 * The following mapping attributes may be updated in live
+	 * kernel mappings without the need for break-before-make.
+	 */
+	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+
+	/* creating or taking down mappings is always safe */
+	if (old == 0 || new == 0)
+		return true;
+
+	/* live contiguous mappings may not be manipulated at all */
+	if ((old | new) & PTE_CONT)
+		return false;
+
+	/* Transitioning from Non-Global to Global is unsafe */
+	if (old & ~new & PTE_NG)
+		return false;
+
+	/*
+	 * Changing the memory type between Normal and Normal-Tagged is safe
+	 * since Tagged is considered a permission attribute from the
+	 * mismatched attribute aliases perspective.
+	 */
+	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
+	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
+		mask |= PTE_ATTRINDX_MASK;
+
+	return ((old ^ new) & ~mask) == 0;
+}
+
+static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
+		     phys_addr_t phys, pgprot_t prot)
+{
+	pte_t *ptep;
+
+	ptep = pte_set_fixmap_offset(pmdp, addr);
+	do {
+		pte_t old_pte = READ_ONCE(*ptep);
+
+		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
+
+		/*
+		 * After the PTE entry has been populated once, we
+		 * only allow updates to the permission attributes.
+		 */
+		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
+					      READ_ONCE(pte_val(*ptep))));
+
+		phys += PAGE_SIZE;
+	} while (ptep++, addr += PAGE_SIZE, addr != end);
+
+	pte_clear_fixmap();
+}
+
+static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
+				unsigned long end, phys_addr_t phys,
+				pgprot_t prot,
+				phys_addr_t (*pgtable_alloc)(int),
+				int flags)
+{
+	unsigned long next;
+	pmd_t pmd = READ_ONCE(*pmdp);
+
+	BUG_ON(pmd_sect(pmd));
+	if (pmd_none(pmd)) {
+		phys_addr_t pte_phys;
+		BUG_ON(!pgtable_alloc);
+		pte_phys = pgtable_alloc(PAGE_SHIFT);
+		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+		pmd = READ_ONCE(*pmdp);
+	}
+	BUG_ON(pmd_bad(pmd));
+
+	do {
+		pgprot_t __prot = prot;
+
+		next = pte_cont_addr_end(addr, end);
+
+		/* use a contiguous mapping if the range is suitably aligned */
+		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
+			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+		init_pte(pmdp, addr, next, phys, __prot);
+
+		phys += next - addr;
+	} while (addr = next, addr != end);
+}
+
+static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
+		     phys_addr_t phys, pgprot_t prot,
+		     phys_addr_t (*pgtable_alloc)(int), int flags)
+{
+	unsigned long next;
+	pmd_t *pmdp;
+
+	pmdp = pmd_set_fixmap_offset(pudp, addr);
+	do {
+		pmd_t old_pmd = READ_ONCE(*pmdp);
+
+		next = pmd_addr_end(addr, end);
+
+		/* try section mapping first */
+		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+			pmd_set_huge(pmdp, phys, prot);
+
+			/*
+			 * After the PMD entry has been populated once, we
+			 * only allow updates to the permission attributes.
+			 */
+			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+						      READ_ONCE(pmd_val(*pmdp))));
+		} else {
+			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
+					    pgtable_alloc, flags);
+
+			BUG_ON(pmd_val(old_pmd) != 0 &&
+			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
+		}
+		phys += next - addr;
+	} while (pmdp++, addr = next, addr != end);
+
+	pmd_clear_fixmap();
+}
+
+static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
+				unsigned long end, phys_addr_t phys,
+				pgprot_t prot,
+				phys_addr_t (*pgtable_alloc)(int), int flags)
+{
+	unsigned long next;
+	pud_t pud = READ_ONCE(*pudp);
+
+	/*
+	 * Check for initial section mappings in the pgd/pud.
+	 */
+	BUG_ON(pud_sect(pud));
+	if (pud_none(pud)) {
+		phys_addr_t pmd_phys;
+		BUG_ON(!pgtable_alloc);
+		pmd_phys = pgtable_alloc(PMD_SHIFT);
+		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+		pud = READ_ONCE(*pudp);
+	}
+	BUG_ON(pud_bad(pud));
+
+	do {
+		pgprot_t __prot = prot;
+
+		next = pmd_cont_addr_end(addr, end);
+
+		/* use a contiguous mapping if the range is suitably aligned */
+		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
+			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
+
+		phys += next - addr;
+	} while (addr = next, addr != end);
+}
+
+static inline bool use_1G_block(unsigned long addr, unsigned long next,
+			unsigned long phys)
+{
+	if (PAGE_SHIFT != 12)
+		return false;
+
+	if (((addr | next | phys) & ~PUD_MASK) != 0)
+		return false;
+
+	return true;
+}
+
+static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
+			   phys_addr_t phys, pgprot_t prot,
+			   phys_addr_t (*pgtable_alloc)(int),
+			   int flags)
+{
+	unsigned long next;
+	pud_t *pudp;
+	p4d_t *p4dp = p4d_offset(pgdp, addr);
+	p4d_t p4d = READ_ONCE(*p4dp);
+
+	if (p4d_none(p4d)) {
+		phys_addr_t pud_phys;
+		BUG_ON(!pgtable_alloc);
+		pud_phys = pgtable_alloc(PUD_SHIFT);
+		__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+		p4d = READ_ONCE(*p4dp);
+	}
+	BUG_ON(p4d_bad(p4d));
+
+	pudp = pud_set_fixmap_offset(p4dp, addr);
+	do {
+		pud_t old_pud = READ_ONCE(*pudp);
+
+		next = pud_addr_end(addr, end);
+
+		/*
+		 * For 4K granule only, attempt to put down a 1GB block
+		 */
+		if (use_1G_block(addr, next, phys) &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+			pud_set_huge(pudp, phys, prot);
+
+			/*
+			 * After the PUD entry has been populated once, we
+			 * only allow updates to the permission attributes.
+			 */
+			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+						      READ_ONCE(pud_val(*pudp))));
+		} else {
+			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
+					    pgtable_alloc, flags);
+
+			BUG_ON(pud_val(old_pud) != 0 &&
+			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
+		}
+		phys += next - addr;
+	} while (pudp++, addr = next, addr != end);
+
+	pud_clear_fixmap();
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+				 unsigned long virt, phys_addr_t size,
+				 pgprot_t prot,
+				 phys_addr_t (*pgtable_alloc)(int),
+				 int flags)
+{
+	unsigned long addr, end, next;
+	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
+
+	/*
+	 * If the virtual and physical address don't have the same offset
+	 * within a page, we cannot map the region as the caller expects.
+	 */
+	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+		return;
+
+	phys &= PAGE_MASK;
+	addr = virt & PAGE_MASK;
+	end = PAGE_ALIGN(virt + size);
+
+	do {
+		next = pgd_addr_end(addr, end);
+		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
+			       flags);
+		phys += next - addr;
+	} while (pgdp++, addr = next, addr != end);
+}
-- 
2.29.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-04-10  9:59 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-10  9:56 [RFC 0/8] use __create_pgd_mapping() to implement idmap and unify codes Pingfan Liu
2021-04-10  9:56 ` Pingfan Liu [this message]
2021-04-14 13:19   ` [RFC 1/8] arm64/mm: split out __create_pgd_mapping() routines Pingfan Liu
2021-04-10  9:56 ` [RFC 2/8] arm64/mm: change __create_pgd_mapping() prototype to accept nr_entries and introduce create_idmap() Pingfan Liu
2021-04-10  9:56 ` [RFC 3/8] arm64/mm: change __create_pgd_mapping() prototype to accept extra info for allocator Pingfan Liu
2021-04-10  9:56 ` [RFC 4/8] arm64/mm: enable __create_pgd_mapping() to run across different pgtable Pingfan Liu
2021-04-10  9:56 ` [RFC 5/8] arm64/mm: make trans_pgd_idmap_page() use create_idmap() Pingfan Liu
2021-04-10  9:56 ` [RFC 6/8] arm64/mm: introduce pgtable allocator for head Pingfan Liu
2021-04-10  9:56 ` [RFC 7/8] arm64/pgtable-prot.h: reorganize to cope with asm Pingfan Liu
2021-04-10  9:56 ` [RFC 8/8] arm64/head: convert idmap_pg_dir and init_pg_dir to __create_pgd_mapping() Pingfan Liu
2021-04-19 14:10   ` Pingfan Liu
2021-04-14 14:05 ` [RFC 0/8] use __create_pgd_mapping() to implement idmap and unify codes Pavel Tatashin
2021-04-15  2:14   ` Pingfan Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210410095654.24102-2-kernelfans@gmail.com \
    --to=kernelfans@gmail.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=anshuman.khandual@arm.com \
    --cc=atish.patra@wdc.com \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=kristina.martsenko@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=logang@deltatee.com \
    --cc=maz@kernel.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=rppt@kernel.org \
    --cc=steven.price@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.