linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Subject: [PATCH v4 33/45] powerpc/8xx: Drop special handling of Linear and IMMR mappings in I/D TLB handlers
Date: Tue, 19 May 2020 05:49:16 +0000 (UTC)	[thread overview]
Message-ID: <221b7e3ead80a5969629938c023f8cfe45fdd2fb.1589866984.git.christophe.leroy@csgroup.eu> (raw)
In-Reply-To: <cover.1589866984.git.christophe.leroy@csgroup.eu>

Up to now, linear and IMMR mappings are managed via huge TLB entries
through specific code directly in TLB miss handlers. This implies
some patching of the TLB miss handlers at startup, and a lot of
dedicated code.

Remove all this specific dedicated code.

For now we are back to normal handling via standard 4k pages. In the
next patches, linear memory mapping and IMMR mapping will be managed
through huge pages.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/kernel/head_8xx.S |  29 +--------
 arch/powerpc/mm/nohash/8xx.c   | 106 +--------------------------------
 2 files changed, 3 insertions(+), 132 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b0cceee6405c..d1546f379757 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -207,31 +207,21 @@ InstructionTLBMiss:
 	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
 	INVALIDATE_ADJACENT_PAGES_CPU15(r10)
 	mtspr	SPRN_MD_EPN, r10
-	/* Only modules will cause ITLB Misses as we always
-	 * pin the first 8MB of kernel memory */
 #ifdef ITLB_MISS_KERNEL
 	mfcr	r11
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
+#if defined(SIMPLE_KERNEL_ADDRESS)
 	cmpi	cr0, r10, 0	/* Address >= 0x80000000 */
 #else
 	rlwinm	r10, r10, 16, 0xfff8
 	cmpli	cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_TEXT
-	/* It is assumed that kernel code fits into the first 32M */
-0:	cmpli	cr7, r10, (PAGE_OFFSET + 0x2000000)@h
-	patch_site	0b, patch__itlbmiss_linmem_top
-#endif
 #endif
 #endif
 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
 #ifdef ITLB_MISS_KERNEL
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
+#if defined(SIMPLE_KERNEL_ADDRESS)
 	bge+	3f
 #else
 	blt+	3f
-#endif
-#ifndef CONFIG_PIN_TLB_TEXT
-	blt	cr7, ITLBMissLinear
 #endif
 	rlwinm	r10, r10, 0, 20, 31
 	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
@@ -327,19 +317,9 @@ DataStoreTLBMiss:
 	mfspr	r10, SPRN_MD_EPN
 	rlwinm	r10, r10, 16, 0xfff8
 	cmpli	cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_IMMR
-	cmpli	cr6, r10, VIRT_IMMR_BASE@h
-#endif
-0:	cmpli	cr7, r10, (PAGE_OFFSET + 0x2000000)@h
-	patch_site	0b, patch__dtlbmiss_linmem_top
 
 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
 	blt+	3f
-#ifndef CONFIG_PIN_TLB_IMMR
-0:	beq-	cr6, DTLBMissIMMR
-	patch_site	0b, patch__dtlbmiss_immr_jmp
-#endif
-	blt	cr7, DTLBMissLinear
 	rlwinm	r10, r10, 0, 20, 31
 	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
 3:
@@ -571,14 +551,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
 	cmpli	cr1, r11, PAGE_OFFSET@h
 	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
 	blt+	cr1, 3f
-	rlwinm	r11, r10, 16, 0xfff8
-
-0:	cmpli	cr7, r11, (PAGE_OFFSET + 0x1800000)@h
-	patch_site	0b, patch__fixupdar_linmem_top
 
 	/* create physical page address from effective address */
 	tophys(r11, r10)
-	blt-	cr7, 201f
 	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
 	rlwinm	r11, r11, 0, 20, 31
 	oris	r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 2c480e35b426..b735482e1529 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -55,8 +55,6 @@ unsigned long p_block_mapped(phys_addr_t pa)
 	return 0;
 }
 
-#define LARGE_PAGE_SIZE_8M	(1<<23)
-
 /*
  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
  */
@@ -81,122 +79,20 @@ void __init mmu_mapin_immr(void)
 		map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
 }
 
-static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
-{
-	modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
-}
-
-static void mmu_patch_addis(s32 *site, long simm)
-{
-	unsigned int instr = *(unsigned int *)patch_site_addr(site);
-
-	instr &= 0xffff0000;
-	instr |= ((unsigned long)simm) >> 16;
-	patch_instruction_site(site, ppc_inst(instr));
-}
-
-static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
-{
-	unsigned long s = offset;
-	unsigned long v = PAGE_OFFSET + s;
-	phys_addr_t p = memstart_addr + s;
-
-	for (; s < top; s += PAGE_SIZE) {
-		map_kernel_page(v, p, prot);
-		v += PAGE_SIZE;
-		p += PAGE_SIZE;
-	}
-}
-
 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
-	unsigned long mapped;
-
 	mmu_mapin_immr();
 
-	if (__map_without_ltlbs) {
-		mapped = 0;
-		if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
-			patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP));
-		if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
-			mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
-	} else {
-		unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
-
-		mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
-		if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
-			mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
-
-		/*
-		 * Populate page tables to:
-		 * - have them appear in /sys/kernel/debug/kernel_page_tables
-		 * - allow the BDI to find the pages when they are not PINNED
-		 */
-		mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
-		mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
-	}
-
-	mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
-	mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
-
-	/* If the size of RAM is not an exact power of two, we may not
-	 * have covered RAM in its entirety with 8 MiB
-	 * pages. Consequently, restrict the top end of RAM currently
-	 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
-	 * coverage with normal-sized pages (or other reasons) do not
-	 * attempt to allocate outside the allowed range.
-	 */
-	if (mapped)
-		memblock_set_current_limit(mapped);
-
-	block_mapped_ram = mapped;
-
-	return mapped;
+	return 0;
 }
 
 void mmu_mark_initmem_nx(void)
 {
-	if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
-		mmu_patch_addis(&patch__itlbmiss_linmem_top8,
-				-((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
-	if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
-		unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
-		unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
-		unsigned long etext = __pa(_etext);
-
-		mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
-
-		/* Update page tables for PTDUMP and BDI */
-		mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
-		if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
-			mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
-			mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
-		} else {
-			mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
-			mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
-		}
-	}
-	_tlbil_all();
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mmu_mark_rodata_ro(void)
 {
-	unsigned long sinittext = __pa(_sinittext);
-	unsigned long etext = __pa(_etext);
-
-	if (CONFIG_DATA_SHIFT < 23)
-		mmu_patch_addis(&patch__dtlbmiss_romem_top8,
-				-__pa(((unsigned long)_sinittext) &
-				      ~(LARGE_PAGE_SIZE_8M - 1)));
-	mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
-
-	_tlbil_all();
-
-	/* Update page tables for PTDUMP and BDI */
-	mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
-	mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
-	mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
 }
 #endif
 
-- 
2.25.0


  parent reply	other threads:[~2020-05-19  5:50 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-19  5:48 [PATCH v4 00/45] Use hugepages to map kernel mem on 8xx Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 01/45] powerpc/kasan: Fix error detection on memory allocation Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 02/45] powerpc/kasan: Fix issues by lowering KASAN_SHADOW_END Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 03/45] powerpc/kasan: Fix shadow pages allocation failure Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 04/45] powerpc/kasan: Remove unnecessary page table locking Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 05/45] powerpc/kasan: Refactor update of early shadow mappings Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 06/45] powerpc/kasan: Declare kasan_init_region() weak Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 07/45] powerpc/ptdump: Limit size of flags text to 1/2 chars on PPC32 Christophe Leroy
2020-05-25  5:15   ` Michael Ellerman
2020-05-25 11:06     ` Christophe Leroy
2020-05-26 12:53       ` Michael Ellerman
2020-05-19  5:48 ` [PATCH v4 08/45] powerpc/ptdump: Reorder flags Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 09/45] powerpc/ptdump: Add _PAGE_COHERENT flag Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 10/45] powerpc/ptdump: Display size of BATs Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 11/45] powerpc/ptdump: Standardise display of BAT flags Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 12/45] powerpc/ptdump: Properly handle non standard page size Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 13/45] powerpc/ptdump: Handle hugepd at PGD level Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 14/45] powerpc/32s: Don't warn when mapping RO data ROX Christophe Leroy
2020-05-25  5:40   ` Michael Ellerman
2020-05-19  5:48 ` [PATCH v4 15/45] powerpc/mm: Allocate static page tables for fixmap Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 16/45] powerpc/mm: Fix conditions to perform MMU specific management by blocks on PPC32 Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 17/45] powerpc/mm: PTE_ATOMIC_UPDATES is only for 40x Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 18/45] powerpc/mm: Refactor pte_update() on nohash/32 Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 19/45] powerpc/mm: Refactor pte_update() on book3s/32 Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 20/45] powerpc/mm: Standardise __ptep_test_and_clear_young() params between PPC32 and PPC64 Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 21/45] powerpc/mm: Standardise pte_update() prototype " Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 22/45] powerpc/mm: Create a dedicated pte_update() for 8xx Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 23/45] powerpc/mm: Reduce hugepd size for 8M hugepages on 8xx Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 24/45] powerpc/8xx: Drop CONFIG_8xx_COPYBACK option Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 25/45] powerpc/8xx: Prepare handlers for _PAGE_HUGE for 512k pages Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 26/45] powerpc/8xx: Manage 512k huge pages as standard pages Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 27/45] powerpc/8xx: Only 8M pages are hugepte pages now Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 28/45] powerpc/8xx: MM_SLICE is not needed anymore Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 29/45] powerpc/8xx: Move PPC_PIN_TLB options into 8xx Kconfig Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 30/45] powerpc/8xx: Add function to set pinned TLBs Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 31/45] powerpc/8xx: Don't set IMMR map anymore at boot Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 32/45] powerpc/8xx: Always pin TLBs at startup Christophe Leroy
2020-05-19  5:49 ` Christophe Leroy [this message]
2020-05-19  5:49 ` [PATCH v4 34/45] powerpc/8xx: Remove now unused TLB miss functions Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 35/45] powerpc/8xx: Move DTLB perf handling closer Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 36/45] powerpc/mm: Don't be too strict with _etext alignment on PPC32 Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 37/45] powerpc/8xx: Refactor kernel address boundary comparison Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 38/45] powerpc/8xx: Add a function to early map kernel via huge pages Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 39/45] powerpc/8xx: Map IMMR with a huge page Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 40/45] powerpc/8xx: Map linear memory with huge pages Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 41/45] powerpc/8xx: Allow STRICT_KERNEL_RwX with pinned TLB Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 42/45] powerpc/8xx: Allow large TLBs with DEBUG_PAGEALLOC Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 43/45] powerpc/8xx: Implement dedicated kasan_init_region() Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 44/45] powerpc/32s: Allow mapping with BATs with DEBUG_PAGEALLOC Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 45/45] powerpc/32s: Implement dedicated kasan_init_region() Christophe Leroy
2020-06-09  5:28 ` [PATCH v4 00/45] Use hugepages to map kernel mem on 8xx Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=221b7e3ead80a5969629938c023f8cfe45fdd2fb.1589866984.git.christophe.leroy@csgroup.eu \
    --to=christophe.leroy@csgroup.eu \
    --cc=benh@kernel.crashing.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).