All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Subject: [PATCH v4 31/45] powerpc/8xx: Don't set IMMR map anymore at boot
Date: Tue, 19 May 2020 05:49:14 +0000 (UTC)	[thread overview]
Message-ID: <13c1e8539fdf363d3146f4884e5c3c76c6c308b5.1589866984.git.christophe.leroy@csgroup.eu> (raw)
In-Reply-To: <cover.1589866984.git.christophe.leroy@csgroup.eu>

Only early debug requires IMMR to be mapped early.

No need to set it up and pin it in assembly. Map it
through page tables at udbg init when necessary.

If CONFIG_PIN_TLB_IMMR is selected, pin it once we
don't need the 32 Mb pinned RAM anymore.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
v2: Disable TLB reservation to modify entry 31
---
 arch/powerpc/kernel/head_8xx.S     | 39 +++++++++++++-----------------
 arch/powerpc/mm/mmu_decl.h         |  4 +++
 arch/powerpc/mm/nohash/8xx.c       | 15 +++++++++---
 arch/powerpc/platforms/8xx/Kconfig |  2 +-
 arch/powerpc/sysdev/cpm_common.c   |  2 ++
 5 files changed, 35 insertions(+), 27 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index c9e3d54e6a6f..d607f4b53e0f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -749,6 +749,23 @@ start_here:
 	rfi
 /* Load up the kernel context */
 2:
+#ifdef CONFIG_PIN_TLB_IMMR
+	lis	r0, MD_TWAM@h
+	oris	r0, r0, 0x1f00
+	mtspr	SPRN_MD_CTR, r0
+	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+	tlbie	r0
+	mtspr	SPRN_MD_EPN, r0
+	LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
+	mtspr	SPRN_MD_TWC, r0
+	mfspr   r0, SPRN_IMMR
+	rlwinm	r0, r0, 0, 0xfff80000
+	ori	r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+			_PAGE_NO_CACHE | _PAGE_PRESENT
+	mtspr	SPRN_MD_RPN, r0
+	lis	r0, (MD_TWAM | MD_RSV4I)@h
+	mtspr	SPRN_MD_CTR, r0
+#endif
 	tlbia			/* Clear all TLB entries */
 	sync			/* wait for tlbia/tlbie to finish */
 
@@ -797,28 +814,6 @@ initial_mmu:
 	ori	r8, r8, MD_APG_INIT@l
 	mtspr	SPRN_MD_AP, r8
 
-	/* Map a 512k page for the IMMR to get the processor
-	 * internal registers (among other things).
-	 */
-#ifdef CONFIG_PIN_TLB_IMMR
-	oris	r10, r10, MD_RSV4I@h
-	ori	r10, r10, 0x1c00
-	mtspr	SPRN_MD_CTR, r10
-
-	mfspr	r9, 638			/* Get current IMMR */
-	andis.	r9, r9, 0xfff8		/* Get 512 kbytes boundary */
-
-	lis	r8, VIRT_IMMR_BASE@h	/* Create vaddr for TLB */
-	ori	r8, r8, MD_EVALID	/* Mark it valid */
-	mtspr	SPRN_MD_EPN, r8
-	li	r8, MD_PS512K | MD_GUARDED	/* Set 512k byte page */
-	ori	r8, r8, MD_SVALID	/* Make it valid */
-	mtspr	SPRN_MD_TWC, r8
-	mr	r8, r9			/* Create paddr for TLB */
-	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
-	mtspr	SPRN_MD_RPN, r8
-#endif
-
 	/* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */
 #ifdef CONFIG_PIN_TLB_TEXT
 	lis	r8, MI_RSV4I@h
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7097e07a209a..1b6d39e9baed 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { }
 static inline void mmu_mark_rodata_ro(void) { }
 #endif
 
+#ifdef CONFIG_PPC_8xx
+void __init mmu_mapin_immr(void);
+#endif
+
 #ifdef CONFIG_PPC_DEBUG_WX
 void ptdump_check_wx(void);
 #else
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index bda5290af751..a9313aa6f1cd 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -66,7 +66,7 @@ void __init MMU_init_hw(void)
 	if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
 		unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
 		unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
-		int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
+		int i = 28;
 		unsigned long addr = 0;
 		unsigned long mem = total_lowmem;
 
@@ -81,12 +81,19 @@ void __init MMU_init_hw(void)
 	}
 }
 
-static void __init mmu_mapin_immr(void)
+static bool immr_is_mapped __initdata;
+
+void __init mmu_mapin_immr(void)
 {
 	unsigned long p = PHYS_IMMR_BASE;
 	unsigned long v = VIRT_IMMR_BASE;
 	int offset;
 
+	if (immr_is_mapped)
+		return;
+
+	immr_is_mapped = true;
+
 	for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
 		map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
 }
@@ -122,9 +129,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
 	unsigned long mapped;
 
+	mmu_mapin_immr();
+
 	if (__map_without_ltlbs) {
 		mapped = 0;
-		mmu_mapin_immr();
 		if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
 			patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP));
 		if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
@@ -143,7 +151,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 		 */
 		mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
 		mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
-		mmu_mapin_immr();
 	}
 
 	mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 0d036cd868ef..04ea1a8a0bdc 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -187,7 +187,7 @@ config PIN_TLB_DATA
 
 config PIN_TLB_IMMR
 	bool "Pinned TLB for IMMR"
-	depends on PIN_TLB || PPC_EARLY_DEBUG_CPM
+	depends on PIN_TLB
 	default y
 	help
 	  This pins the IMMR area with a 512kbytes page. In case
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 71660bacb264..7dc1960f8bdb 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c)
 void __init udbg_init_cpm(void)
 {
 #ifdef CONFIG_PPC_8xx
+	mmu_mapin_immr();
+
 	cpm_udbg_txdesc = (u32 __iomem __force *)
 			  (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
 			   VIRT_IMMR_BASE);
-- 
2.25.0


WARNING: multiple messages have this Message-ID (diff)
From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: [PATCH v4 31/45] powerpc/8xx: Don't set IMMR map anymore at boot
Date: Tue, 19 May 2020 05:49:14 +0000 (UTC)	[thread overview]
Message-ID: <13c1e8539fdf363d3146f4884e5c3c76c6c308b5.1589866984.git.christophe.leroy@csgroup.eu> (raw)
In-Reply-To: <cover.1589866984.git.christophe.leroy@csgroup.eu>

Only early debug requires IMMR to be mapped early.

No need to set it up and pin it in assembly. Map it
through page tables at udbg init when necessary.

If CONFIG_PIN_TLB_IMMR is selected, pin it once we
don't need the 32 Mb pinned RAM anymore.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
v2: Disable TLB reservation to modify entry 31
---
 arch/powerpc/kernel/head_8xx.S     | 39 +++++++++++++-----------------
 arch/powerpc/mm/mmu_decl.h         |  4 +++
 arch/powerpc/mm/nohash/8xx.c       | 15 +++++++++---
 arch/powerpc/platforms/8xx/Kconfig |  2 +-
 arch/powerpc/sysdev/cpm_common.c   |  2 ++
 5 files changed, 35 insertions(+), 27 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index c9e3d54e6a6f..d607f4b53e0f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -749,6 +749,23 @@ start_here:
 	rfi
 /* Load up the kernel context */
 2:
+#ifdef CONFIG_PIN_TLB_IMMR
+	lis	r0, MD_TWAM@h
+	oris	r0, r0, 0x1f00
+	mtspr	SPRN_MD_CTR, r0
+	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+	tlbie	r0
+	mtspr	SPRN_MD_EPN, r0
+	LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
+	mtspr	SPRN_MD_TWC, r0
+	mfspr   r0, SPRN_IMMR
+	rlwinm	r0, r0, 0, 0xfff80000
+	ori	r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+			_PAGE_NO_CACHE | _PAGE_PRESENT
+	mtspr	SPRN_MD_RPN, r0
+	lis	r0, (MD_TWAM | MD_RSV4I)@h
+	mtspr	SPRN_MD_CTR, r0
+#endif
 	tlbia			/* Clear all TLB entries */
 	sync			/* wait for tlbia/tlbie to finish */
 
@@ -797,28 +814,6 @@ initial_mmu:
 	ori	r8, r8, MD_APG_INIT@l
 	mtspr	SPRN_MD_AP, r8
 
-	/* Map a 512k page for the IMMR to get the processor
-	 * internal registers (among other things).
-	 */
-#ifdef CONFIG_PIN_TLB_IMMR
-	oris	r10, r10, MD_RSV4I@h
-	ori	r10, r10, 0x1c00
-	mtspr	SPRN_MD_CTR, r10
-
-	mfspr	r9, 638			/* Get current IMMR */
-	andis.	r9, r9, 0xfff8		/* Get 512 kbytes boundary */
-
-	lis	r8, VIRT_IMMR_BASE@h	/* Create vaddr for TLB */
-	ori	r8, r8, MD_EVALID	/* Mark it valid */
-	mtspr	SPRN_MD_EPN, r8
-	li	r8, MD_PS512K | MD_GUARDED	/* Set 512k byte page */
-	ori	r8, r8, MD_SVALID	/* Make it valid */
-	mtspr	SPRN_MD_TWC, r8
-	mr	r8, r9			/* Create paddr for TLB */
-	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
-	mtspr	SPRN_MD_RPN, r8
-#endif
-
 	/* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */
 #ifdef CONFIG_PIN_TLB_TEXT
 	lis	r8, MI_RSV4I@h
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7097e07a209a..1b6d39e9baed 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { }
 static inline void mmu_mark_rodata_ro(void) { }
 #endif
 
+#ifdef CONFIG_PPC_8xx
+void __init mmu_mapin_immr(void);
+#endif
+
 #ifdef CONFIG_PPC_DEBUG_WX
 void ptdump_check_wx(void);
 #else
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index bda5290af751..a9313aa6f1cd 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -66,7 +66,7 @@ void __init MMU_init_hw(void)
 	if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
 		unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
 		unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
-		int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
+		int i = 28;
 		unsigned long addr = 0;
 		unsigned long mem = total_lowmem;
 
@@ -81,12 +81,19 @@ void __init MMU_init_hw(void)
 	}
 }
 
-static void __init mmu_mapin_immr(void)
+static bool immr_is_mapped __initdata;
+
+void __init mmu_mapin_immr(void)
 {
 	unsigned long p = PHYS_IMMR_BASE;
 	unsigned long v = VIRT_IMMR_BASE;
 	int offset;
 
+	if (immr_is_mapped)
+		return;
+
+	immr_is_mapped = true;
+
 	for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
 		map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
 }
@@ -122,9 +129,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
 	unsigned long mapped;
 
+	mmu_mapin_immr();
+
 	if (__map_without_ltlbs) {
 		mapped = 0;
-		mmu_mapin_immr();
 		if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
 			patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP));
 		if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
@@ -143,7 +151,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 		 */
 		mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
 		mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
-		mmu_mapin_immr();
 	}
 
 	mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 0d036cd868ef..04ea1a8a0bdc 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -187,7 +187,7 @@ config PIN_TLB_DATA
 
 config PIN_TLB_IMMR
 	bool "Pinned TLB for IMMR"
-	depends on PIN_TLB || PPC_EARLY_DEBUG_CPM
+	depends on PIN_TLB
 	default y
 	help
 	  This pins the IMMR area with a 512kbytes page. In case
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 71660bacb264..7dc1960f8bdb 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c)
 void __init udbg_init_cpm(void)
 {
 #ifdef CONFIG_PPC_8xx
+	mmu_mapin_immr();
+
 	cpm_udbg_txdesc = (u32 __iomem __force *)
 			  (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
 			   VIRT_IMMR_BASE);
-- 
2.25.0


  parent reply	other threads:[~2020-05-19  5:49 UTC|newest]

Thread overview: 101+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-19  5:48 [PATCH v4 00/45] Use hugepages to map kernel mem on 8xx Christophe Leroy
2020-05-19  5:48 ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 01/45] powerpc/kasan: Fix error detection on memory allocation Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 02/45] powerpc/kasan: Fix issues by lowering KASAN_SHADOW_END Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 03/45] powerpc/kasan: Fix shadow pages allocation failure Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 04/45] powerpc/kasan: Remove unnecessary page table locking Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 05/45] powerpc/kasan: Refactor update of early shadow mappings Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 06/45] powerpc/kasan: Declare kasan_init_region() weak Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 07/45] powerpc/ptdump: Limit size of flags text to 1/2 chars on PPC32 Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-25  5:15   ` Michael Ellerman
2020-05-25  5:15     ` Michael Ellerman
2020-05-25 11:06     ` Christophe Leroy
2020-05-25 11:06       ` Christophe Leroy
2020-05-26 12:53       ` Michael Ellerman
2020-05-26 12:53         ` Michael Ellerman
2020-05-19  5:48 ` [PATCH v4 08/45] powerpc/ptdump: Reorder flags Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 09/45] powerpc/ptdump: Add _PAGE_COHERENT flag Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 10/45] powerpc/ptdump: Display size of BATs Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 11/45] powerpc/ptdump: Standardise display of BAT flags Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 12/45] powerpc/ptdump: Properly handle non standard page size Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 13/45] powerpc/ptdump: Handle hugepd at PGD level Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 14/45] powerpc/32s: Don't warn when mapping RO data ROX Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-25  5:40   ` Michael Ellerman
2020-05-25  5:40     ` Michael Ellerman
2020-05-19  5:48 ` [PATCH v4 15/45] powerpc/mm: Allocate static page tables for fixmap Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:48 ` [PATCH v4 16/45] powerpc/mm: Fix conditions to perform MMU specific management by blocks on PPC32 Christophe Leroy
2020-05-19  5:48   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 17/45] powerpc/mm: PTE_ATOMIC_UPDATES is only for 40x Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 18/45] powerpc/mm: Refactor pte_update() on nohash/32 Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 19/45] powerpc/mm: Refactor pte_update() on book3s/32 Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 20/45] powerpc/mm: Standardise __ptep_test_and_clear_young() params between PPC32 and PPC64 Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 21/45] powerpc/mm: Standardise pte_update() prototype " Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 22/45] powerpc/mm: Create a dedicated pte_update() for 8xx Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 23/45] powerpc/mm: Reduce hugepd size for 8M hugepages on 8xx Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 24/45] powerpc/8xx: Drop CONFIG_8xx_COPYBACK option Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 25/45] powerpc/8xx: Prepare handlers for _PAGE_HUGE for 512k pages Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 26/45] powerpc/8xx: Manage 512k huge pages as standard pages Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 27/45] powerpc/8xx: Only 8M pages are hugepte pages now Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 28/45] powerpc/8xx: MM_SLICE is not needed anymore Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 29/45] powerpc/8xx: Move PPC_PIN_TLB options into 8xx Kconfig Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 30/45] powerpc/8xx: Add function to set pinned TLBs Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` Christophe Leroy [this message]
2020-05-19  5:49   ` [PATCH v4 31/45] powerpc/8xx: Don't set IMMR map anymore at boot Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 32/45] powerpc/8xx: Always pin TLBs at startup Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 33/45] powerpc/8xx: Drop special handling of Linear and IMMR mappings in I/D TLB handlers Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 34/45] powerpc/8xx: Remove now unused TLB miss functions Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 35/45] powerpc/8xx: Move DTLB perf handling closer Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 36/45] powerpc/mm: Don't be too strict with _etext alignment on PPC32 Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 37/45] powerpc/8xx: Refactor kernel address boundary comparison Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 38/45] powerpc/8xx: Add a function to early map kernel via huge pages Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 39/45] powerpc/8xx: Map IMMR with a huge page Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 40/45] powerpc/8xx: Map linear memory with huge pages Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 41/45] powerpc/8xx: Allow STRICT_KERNEL_RwX with pinned TLB Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 42/45] powerpc/8xx: Allow large TLBs with DEBUG_PAGEALLOC Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 43/45] powerpc/8xx: Implement dedicated kasan_init_region() Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 44/45] powerpc/32s: Allow mapping with BATs with DEBUG_PAGEALLOC Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-05-19  5:49 ` [PATCH v4 45/45] powerpc/32s: Implement dedicated kasan_init_region() Christophe Leroy
2020-05-19  5:49   ` Christophe Leroy
2020-06-09  5:28 ` [PATCH v4 00/45] Use hugepages to map kernel mem on 8xx Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=13c1e8539fdf363d3146f4884e5c3c76c6c308b5.1589866984.git.christophe.leroy@csgroup.eu \
    --to=christophe.leroy@csgroup.eu \
    --cc=benh@kernel.crashing.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.