linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround
@ 2018-01-12 12:45 Christophe Leroy
  2018-01-12 12:45 ` [PATCH 2/8] powerpc/8xx: remove EXCEPTION_PROLOG/EPILOG_0 and change r3 to r12 Christophe Leroy
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

CPU6 ERRATA affects only MPC860 revisions prior to C.0. Manufacturing
of those revisiosn was stopped in 1999-2000.
Therefore, it has been almost 20 years since this ERRATA has been
fixed in the silicon.

This patch removes the workaround for that ERRATA.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/configs/mpc866_ads_defconfig |  1 -
 arch/powerpc/include/asm/reg_8xx.h        | 82 -------------------------------
 arch/powerpc/kernel/head_8xx.S            | 54 +++++---------------
 arch/powerpc/platforms/8xx/Kconfig        | 12 -----
 4 files changed, 12 insertions(+), 137 deletions(-)

diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index f1f176c29fa3..5320735395e7 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -13,7 +13,6 @@ CONFIG_EXPERT=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MPC86XADS=y
 CONFIG_8xx_COPYBACK=y
-CONFIG_8xx_CPU6=y
 CONFIG_GEN_RTC=y
 CONFIG_HZ_1000=y
 CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 53a7e2955d3e..7192eece6c3e 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -66,86 +66,4 @@
 #define DC_DFWT		0x40000000	/* Data cache is forced write through */
 #define DC_LES		0x20000000	/* Caches are little endian mode */
 
-#ifdef CONFIG_8xx_CPU6
-#define do_mtspr_cpu6(rn, rn_addr, v)	\
-	do {								\
-		int _reg_cpu6 = rn_addr, _tmp_cpu6;		\
-		asm volatile("stw %0, %1;"				\
-			     "lwz %0, %1;"				\
-			     "mtspr " __stringify(rn) ",%2" :		\
-			     : "r" (_reg_cpu6), "m"(_tmp_cpu6),		\
-			       "r" ((unsigned long)(v))			\
-			     : "memory");				\
-	} while (0)
-
-#define do_mtspr(rn, v)	asm volatile("mtspr " __stringify(rn) ",%0" :	\
-				     : "r" ((unsigned long)(v))		\
-				     : "memory")
-#define mtspr(rn, v) \
-	do {								\
-		if (rn == SPRN_IMMR)					\
-			do_mtspr_cpu6(rn, 0x3d30, v);			\
-		else if (rn == SPRN_IC_CST)				\
-			do_mtspr_cpu6(rn, 0x2110, v);			\
-		else if (rn == SPRN_IC_ADR)				\
-			do_mtspr_cpu6(rn, 0x2310, v);			\
-		else if (rn == SPRN_IC_DAT)				\
-			do_mtspr_cpu6(rn, 0x2510, v);			\
-		else if (rn == SPRN_DC_CST)				\
-			do_mtspr_cpu6(rn, 0x3110, v);			\
-		else if (rn == SPRN_DC_ADR)				\
-			do_mtspr_cpu6(rn, 0x3310, v);			\
-		else if (rn == SPRN_DC_DAT)				\
-			do_mtspr_cpu6(rn, 0x3510, v);			\
-		else if (rn == SPRN_MI_CTR)				\
-			do_mtspr_cpu6(rn, 0x2180, v);			\
-		else if (rn == SPRN_MI_AP)				\
-			do_mtspr_cpu6(rn, 0x2580, v);			\
-		else if (rn == SPRN_MI_EPN)				\
-			do_mtspr_cpu6(rn, 0x2780, v);			\
-		else if (rn == SPRN_MI_TWC)				\
-			do_mtspr_cpu6(rn, 0x2b80, v);			\
-		else if (rn == SPRN_MI_RPN)				\
-			do_mtspr_cpu6(rn, 0x2d80, v);			\
-		else if (rn == SPRN_MI_CAM)				\
-			do_mtspr_cpu6(rn, 0x2190, v);			\
-		else if (rn == SPRN_MI_RAM0)				\
-			do_mtspr_cpu6(rn, 0x2390, v);			\
-		else if (rn == SPRN_MI_RAM1)				\
-			do_mtspr_cpu6(rn, 0x2590, v);			\
-		else if (rn == SPRN_MD_CTR)				\
-			do_mtspr_cpu6(rn, 0x3180, v);			\
-		else if (rn == SPRN_M_CASID)				\
-			do_mtspr_cpu6(rn, 0x3380, v);			\
-		else if (rn == SPRN_MD_AP)				\
-			do_mtspr_cpu6(rn, 0x3580, v);			\
-		else if (rn == SPRN_MD_EPN)				\
-			do_mtspr_cpu6(rn, 0x3780, v);			\
-		else if (rn == SPRN_M_TWB)				\
-			do_mtspr_cpu6(rn, 0x3980, v);			\
-		else if (rn == SPRN_MD_TWC)				\
-			do_mtspr_cpu6(rn, 0x3b80, v);			\
-		else if (rn == SPRN_MD_RPN)				\
-			do_mtspr_cpu6(rn, 0x3d80, v);			\
-		else if (rn == SPRN_M_TW)				\
-			do_mtspr_cpu6(rn, 0x3f80, v);			\
-		else if (rn == SPRN_MD_CAM)				\
-			do_mtspr_cpu6(rn, 0x3190, v);			\
-		else if (rn == SPRN_MD_RAM0)				\
-			do_mtspr_cpu6(rn, 0x3390, v);			\
-		else if (rn == SPRN_MD_RAM1)				\
-			do_mtspr_cpu6(rn, 0x3590, v);			\
-		else if (rn == SPRN_DEC)				\
-			do_mtspr_cpu6(rn, 0x2c00, v);			\
-		else if (rn == SPRN_TBWL)				\
-			do_mtspr_cpu6(rn, 0x3880, v);			\
-		else if (rn == SPRN_TBWU)				\
-			do_mtspr_cpu6(rn, 0x3a80, v);			\
-		else if (rn == SPRN_DPDR)				\
-			do_mtspr_cpu6(rn, 0x2d30, v);			\
-		else							\
-			do_mtspr(rn, v);				\
-	} while (0)
-#endif
-
 #endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 4fee00d414e8..728b513c07b8 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -33,23 +33,6 @@
 #include <asm/fixmap.h>
 #include <asm/export.h>
 
-/* Macro to make the code more readable. */
-#ifdef CONFIG_8xx_CPU6
-#define SPRN_MI_TWC_ADDR	0x2b80
-#define SPRN_MI_RPN_ADDR	0x2d80
-#define SPRN_MD_TWC_ADDR	0x3b80
-#define SPRN_MD_RPN_ADDR	0x3d80
-
-#define MTSPR_CPU6(spr, reg, treg)	\
-	li	treg, spr##_ADDR;	\
-	stw	treg, 12(r0);		\
-	lwz	treg, 12(r0);		\
-	mtspr	spr, reg
-#else
-#define MTSPR_CPU6(spr, reg, treg)	\
-	mtspr	spr, reg
-#endif
-
 #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
 /* By simply checking Address >= 0x80000000, we know if its a kernel address */
 #define SIMPLE_KERNEL_ADDRESS		1
@@ -326,7 +309,7 @@ SystemCall:
 #endif
 
 InstructionTLBMiss:
-#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
 	mtspr	SPRN_SPRG_SCRATCH2, r3
 #endif
 	EXCEPTION_PROLOG_0
@@ -393,7 +376,7 @@ _ENTRY(ITLBMiss_cmp)
 	/* Insert the APG into the TWC from the Linux PTE. */
 	rlwimi	r11, r10, 0, 25, 26
 	/* Load the MI_TWC with the attributes for this "segment." */
-	MTSPR_CPU6(SPRN_MI_TWC, r11, r3)	/* Set segment attributes */
+	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
 
 #if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
 	rlwimi	r10, r11, 1, MI_SPS16K
@@ -415,10 +398,10 @@ _ENTRY(ITLBMiss_cmp)
 #else
 	rlwimi	r10, r11, 0, 0x0ff8	/* Set 24-27, clear 20-23,28 */
 #endif
-	MTSPR_CPU6(SPRN_MI_RPN, r10, r3)	/* Update TLB entry */
+	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
-#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
 	mfspr	r3, SPRN_SPRG_SCRATCH2
 #endif
 	EXCEPTION_EPILOG_0
@@ -512,7 +495,7 @@ _ENTRY(DTLBMiss_jmp)
 	 * It is bit 25 in the Linux PTE and bit 30 in the TWC
 	 */
 	rlwimi	r11, r10, 32-5, 30, 30
-	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+	mtspr	SPRN_MD_TWC, r11
 
 	/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
 	 * In 16k pages mode, SPS is always 1 */
@@ -546,7 +529,7 @@ _ENTRY(DTLBMiss_jmp)
 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
 #endif
 	rlwimi	r10, r11, 0, 20, 20	/* clear 20 */
-	MTSPR_CPU6(SPRN_MD_RPN, r10, r3)	/* Update TLB entry */
+	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
 	mfspr	r3, SPRN_SPRG_SCRATCH2
@@ -684,12 +667,12 @@ DTLBMissIMMR:
 	mtcr	r3
 	/* Set 512k byte guarded page and mark it valid */
 	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
-	MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+	mtspr	SPRN_MD_TWC, r10
 	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
 	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
 	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
 			  _PAGE_PRESENT | _PAGE_NO_CACHE
-	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
+	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
@@ -701,11 +684,11 @@ DTLBMissLinear:
 	mtcr	r3
 	/* Set 8M byte page and mark it valid */
 	li	r11, MD_PS8MEG | MD_SVALID
-	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+	mtspr	SPRN_MD_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
 	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
 			  _PAGE_PRESENT
-	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
+	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
@@ -718,11 +701,11 @@ ITLBMissLinear:
 	mtcr	r3
 	/* Set 8M byte page and mark it valid */
 	li	r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
-	MTSPR_CPU6(SPRN_MI_TWC, r11, r3)
+	mtspr	SPRN_MI_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
 	ori	r10, r10, 0xf0 | MI_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
 			  _PAGE_PRESENT
-	MTSPR_CPU6(SPRN_MI_RPN, r10, r11)	/* Update TLB entry */
+	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
 	mfspr	r3, SPRN_SPRG_SCRATCH2
 	EXCEPTION_EPILOG_0
@@ -933,13 +916,6 @@ start_here:
 	 */
 	lis	r6, swapper_pg_dir@ha
 	tophys(r6,r6)
-#ifdef CONFIG_8xx_CPU6
-	lis	r4, cpu6_errata_word@h
-	ori	r4, r4, cpu6_errata_word@l
-	li	r3, 0x3f80
-	stw	r3, 12(r4)
-	lwz	r3, 12(r4)
-#endif
 	mtspr	SPRN_M_TW, r6
 	lis	r4,2f@h
 	ori	r4,r4,2f@l
@@ -1094,12 +1070,6 @@ swapper_pg_dir:
 abatron_pteptrs:
 	.space	8
 
-#ifdef CONFIG_8xx_CPU6
-	.globl	cpu6_errata_word
-cpu6_errata_word:
-	.space	16
-#endif
-
 #ifdef CONFIG_PPC_8xx_PERF_EVENT
 	.globl	itlb_miss_counter
 itlb_miss_counter:
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index e7f33e96b395..e417988eaef9 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -124,18 +124,6 @@ config 8xx_GPIO
 
 	  If in doubt, say Y here.
 
-config 8xx_CPU6
-	bool "CPU6 Silicon Errata (860 Pre Rev. C)"
-	help
-	  MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
-	  require workarounds for Linux (and most other OSes to work).  If you
-	  get a BUG() very early in boot, this might fix the problem.  For
-	  more details read the document entitled "MPC860 Family Device Errata
-	  Reference" on Freescale's website.  This option also incurs a
-	  performance hit.
-
-	  If in doubt, say N here.
-
 config 8xx_CPU15
 	bool "CPU15 Silicon Errata"
 	depends on !HUGETLB_PAGE
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/8] powerpc/8xx: remove EXCEPTION_PROLOG/EPILOG_0 and change r3 to r12
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 3/8] powerpc/8xx: Only perform perf counting when perf is in use Christophe Leroy
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

EXCEPTION_PROLOG_0 and EXCEPTION_EPILOG_0 were added some
time ago in order to regroup the two mtspr/mfspr to SCRATCH0 and
SCRATCH1 and the mfcr/mtcr in order to ease entry and exit of
function not using the full EXCEPTION_PROLOG.

Since then, the mfcr/mtcr has been taken out, hence just leaving
the two mtspr/mfspr in the macro.

In order to improve readability of the exception functions, we
remove those two macros and copy back the two mtspr/mfspr instead.

As r10 and r11 are used for SCRATCH0 and SCRATCH1, lets also use
r12 for SCRATCH2. It will also improve the readability/maintenance.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/kernel/head_8xx.S | 78 ++++++++++++++++++++++--------------------
 1 file changed, 40 insertions(+), 38 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 728b513c07b8..eda582b96dbf 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -117,15 +117,12 @@ turn_on_mmu:
  * task's thread_struct.
  */
 #define EXCEPTION_PROLOG	\
-	EXCEPTION_PROLOG_0;	\
+	mtspr	SPRN_SPRG_SCRATCH0, r10;	\
+	mtspr	SPRN_SPRG_SCRATCH1, r11;	\
 	mfcr	r10;		\
 	EXCEPTION_PROLOG_1;	\
 	EXCEPTION_PROLOG_2
 
-#define EXCEPTION_PROLOG_0	\
-	mtspr	SPRN_SPRG_SCRATCH0,r10;	\
-	mtspr	SPRN_SPRG_SCRATCH1,r11
-
 #define EXCEPTION_PROLOG_1	\
 	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
 	andi.	r11,r11,MSR_PR;	\
@@ -160,13 +157,6 @@ turn_on_mmu:
 	SAVE_2GPRS(7, r11)
 
 /*
- * Exception exit code.
- */
-#define EXCEPTION_EPILOG_0	\
-	mfspr	r10,SPRN_SPRG_SCRATCH0;	\
-	mfspr	r11,SPRN_SPRG_SCRATCH1
-
-/*
  * Note: code which follows this uses cr0.eq (set if from kernel),
  * r11, r12 (SRR0), and r9 (SRR1).
  *
@@ -309,10 +299,11 @@ SystemCall:
 #endif
 
 InstructionTLBMiss:
+	mtspr	SPRN_SPRG_SCRATCH0, r10
+	mtspr	SPRN_SPRG_SCRATCH1, r11
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
-	mtspr	SPRN_SPRG_SCRATCH2, r3
+	mtspr	SPRN_SPRG_SCRATCH2, r12
 #endif
-	EXCEPTION_PROLOG_0
 #ifdef CONFIG_PPC_8xx_PERF_EVENT
 	lis	r10, (itlb_miss_counter - PAGE_OFFSET)@ha
 	lwz	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
@@ -328,7 +319,7 @@ InstructionTLBMiss:
 	/* Only modules will cause ITLB Misses as we always
 	 * pin the first 8MB of kernel memory */
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
-	mfcr	r3
+	mfcr	r12
 #endif
 #ifdef ITLB_MISS_KERNEL
 #if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
@@ -371,7 +362,7 @@ _ENTRY(ITLBMiss_cmp)
 	lwz	r10, 0(r10)	/* Get the pte */
 4:
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
-	mtcr	r3
+	mtcr	r12
 #endif
 	/* Insert the APG into the TWC from the Linux PTE. */
 	rlwimi	r11, r10, 0, 25, 26
@@ -401,10 +392,11 @@ _ENTRY(ITLBMiss_cmp)
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
-	mfspr	r3, SPRN_SPRG_SCRATCH2
+	mfspr	r12, SPRN_SPRG_SCRATCH2
 #endif
-	EXCEPTION_EPILOG_0
 	rfi
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -434,15 +426,16 @@ _ENTRY(ITLBMiss_cmp)
 
 	. = 0x1200
 DataStoreTLBMiss:
-	mtspr	SPRN_SPRG_SCRATCH2, r3
-	EXCEPTION_PROLOG_0
+	mtspr	SPRN_SPRG_SCRATCH0, r10
+	mtspr	SPRN_SPRG_SCRATCH1, r11
+	mtspr	SPRN_SPRG_SCRATCH2, r12
 #ifdef CONFIG_PPC_8xx_PERF_EVENT
 	lis	r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
 	lwz	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
 	addi	r11, r11, 1
 	stw	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
 #endif
-	mfcr	r3
+	mfcr	r12
 
 	/* If we are faulting a kernel address, we have to use the
 	 * kernel page tables.
@@ -482,7 +475,7 @@ _ENTRY(DTLBMiss_jmp)
 	rlwimi	r10, r11, 0, 0, 32 - PAGE_SHIFT - 1	/* Add level 2 base */
 	lwz	r10, 0(r10)	/* Get the pte */
 4:
-	mtcr	r3
+	mtcr	r12
 
 	/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
 	 * It is bit 26-27 of both the Linux PTE and the TWC (at least
@@ -532,9 +525,10 @@ _ENTRY(DTLBMiss_jmp)
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
-	mfspr	r3, SPRN_SPRG_SCRATCH2
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+	mfspr	r12, SPRN_SPRG_SCRATCH2
 	rfi
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -584,7 +578,8 @@ itlbie:
  */
 	. = 0x1400
 DataTLBError:
-	EXCEPTION_PROLOG_0
+	mtspr	SPRN_SPRG_SCRATCH0, r10
+	mtspr	SPRN_SPRG_SCRATCH1, r11
 	mfcr	r10
 
 	mfspr	r11, SPRN_DAR
@@ -619,7 +614,8 @@ dtlbie:
  */
 	. = 0x1c00
 DataBreakpoint:
-	EXCEPTION_PROLOG_0
+	mtspr	SPRN_SPRG_SCRATCH0, r10
+	mtspr	SPRN_SPRG_SCRATCH1, r11
 	mfcr	r10
 	mfspr	r11, SPRN_SRR0
 	cmplwi	cr0, r11, (dtlbie - PAGE_OFFSET)@l
@@ -635,13 +631,15 @@ DataBreakpoint:
 	EXC_XFER_EE(0x1c00, do_break)
 11:
 	mtcr	r10
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
 	rfi
 
 #ifdef CONFIG_PPC_8xx_PERF_EVENT
 	. = 0x1d00
 InstructionBreakpoint:
-	EXCEPTION_PROLOG_0
+	mtspr	SPRN_SPRG_SCRATCH0, r10
+	mtspr	SPRN_SPRG_SCRATCH1, r11
 	lis	r10, (instruction_counter - PAGE_OFFSET)@ha
 	lwz	r11, (instruction_counter - PAGE_OFFSET)@l(r10)
 	addi	r11, r11, -1
@@ -649,7 +647,8 @@ InstructionBreakpoint:
 	lis	r10, 0xffff
 	ori	r10, r10, 0x01
 	mtspr	SPRN_COUNTA, r10
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
 	rfi
 #else
 	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
@@ -664,7 +663,7 @@ InstructionBreakpoint:
  * not enough space in the DataStoreTLBMiss area.
  */
 DTLBMissIMMR:
-	mtcr	r3
+	mtcr	r12
 	/* Set 512k byte guarded page and mark it valid */
 	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
 	mtspr	SPRN_MD_TWC, r10
@@ -676,12 +675,13 @@ DTLBMissIMMR:
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
-	mfspr	r3, SPRN_SPRG_SCRATCH2
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+	mfspr	r12, SPRN_SPRG_SCRATCH2
 	rfi
 
 DTLBMissLinear:
-	mtcr	r3
+	mtcr	r12
 	/* Set 8M byte page and mark it valid */
 	li	r11, MD_PS8MEG | MD_SVALID
 	mtspr	SPRN_MD_TWC, r11
@@ -692,13 +692,14 @@ DTLBMissLinear:
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
-	mfspr	r3, SPRN_SPRG_SCRATCH2
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+	mfspr	r12, SPRN_SPRG_SCRATCH2
 	rfi
 
 #ifndef CONFIG_PIN_TLB_TEXT
 ITLBMissLinear:
-	mtcr	r3
+	mtcr	r12
 	/* Set 8M byte page and mark it valid */
 	li	r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
 	mtspr	SPRN_MI_TWC, r11
@@ -707,8 +708,9 @@ ITLBMissLinear:
 			  _PAGE_PRESENT
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
-	mfspr	r3, SPRN_SPRG_SCRATCH2
-	EXCEPTION_EPILOG_0
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+	mfspr	r12, SPRN_SPRG_SCRATCH2
 	rfi
 #endif
 
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/8] powerpc/8xx: Only perform perf counting when perf is in use.
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
  2018-01-12 12:45 ` [PATCH 2/8] powerpc/8xx: remove EXCEPTION_PROLOG/EPILOG_0 and change r3 to r12 Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 4/8] powerpc/8xx: remove unused _PAGE_WRITETHRU Christophe Leroy
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

In TLB miss handlers, updating the perf counter is only useful
when performing a perf analysis. As it has a noticeable overhead,
let's only do it when needed.

In order to do so, the exit of the miss handlers will be patched
when starting/stopping 'perf': the first register restore
instruction of each exit point will be replaced by a jump to
the counting code.

Once this is done, CONFIG_PPC_8xx_PERF_EVENT becomes useless as
this feature doesn't add any overhead.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/ppc-opcode.h  |  2 ++
 arch/powerpc/kernel/entry_32.S         | 10 +++----
 arch/powerpc/kernel/head_8xx.S         | 47 ++++++++++++++++++++----------
 arch/powerpc/perf/8xx-pmu.c            | 52 +++++++++++++++++++++++++++++++---
 arch/powerpc/perf/Makefile             |  2 +-
 arch/powerpc/platforms/Kconfig.cputype |  7 -----
 6 files changed, 88 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ce0930d68857..ab5c1588b487 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -236,6 +236,7 @@
 #define PPC_INST_RFCI			0x4c000066
 #define PPC_INST_RFDI			0x4c00004e
 #define PPC_INST_RFMCI			0x4c00004c
+#define PPC_INST_MFSPR			0x7c0002a6
 #define PPC_INST_MFSPR_DSCR		0x7c1102a6
 #define PPC_INST_MFSPR_DSCR_MASK	0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR		0x7c1103a6
@@ -383,6 +384,7 @@
 #define __PPC_ME64(s)	__PPC_MB64(s)
 #define __PPC_BI(s)	(((s) & 0x1f) << 16)
 #define __PPC_CT(t)	(((t) & 0x0f) << 21)
+#define __PPC_SPR(r)	((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
 
 /*
  * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e780e1fbf6c2..eb8d01bae8c6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -211,7 +211,7 @@ transfer_to_handler_cont:
 	mflr	r9
 	lwz	r11,0(r9)		/* virtual address of handler */
 	lwz	r9,4(r9)		/* where to go when done */
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 	mtspr	SPRN_NRI, r0
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -301,7 +301,7 @@ stack_ovf:
 	lis	r9,StackOverflow@ha
 	addi	r9,r9,StackOverflow@l
 	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 	mtspr	SPRN_NRI, r0
 #endif
 	mtspr	SPRN_SRR0,r9
@@ -430,7 +430,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 	lwz	r7,_NIP(r1)
 	lwz	r2,GPR2(r1)
 	lwz	r1,GPR1(r1)
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 	mtspr	SPRN_NRI, r0
 #endif
 	mtspr	SPRN_SRR0,r7
@@ -727,7 +727,7 @@ fast_exception_return:
 	lwz	r10,_LINK(r11)
 	mtlr	r10
 	REST_GPR(10, r11)
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 	mtspr	SPRN_NRI, r0
 #endif
 	mtspr	SPRN_SRR1,r9
@@ -978,7 +978,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 	.globl exc_exit_restart
 exc_exit_restart:
 	lwz	r12,_NIP(r1)
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 	mtspr	SPRN_NRI, r0
 #endif
 	mtspr	SPRN_SRR0,r12
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index eda582b96dbf..641c9a9d4db2 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -304,12 +304,6 @@ InstructionTLBMiss:
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
 	mtspr	SPRN_SPRG_SCRATCH2, r12
 #endif
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
-	lis	r10, (itlb_miss_counter - PAGE_OFFSET)@ha
-	lwz	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
-	addi	r11, r11, 1
-	stw	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
-#endif
 
 	/* If we are faulting a kernel address, we have to use the
 	 * kernel page tables.
@@ -392,6 +386,20 @@ _ENTRY(ITLBMiss_cmp)
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
+_ENTRY(itlb_miss_exit_1)
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
+	mfspr	r12, SPRN_SPRG_SCRATCH2
+#endif
+	rfi
+#ifdef CONFIG_PERF_EVENTS
+_ENTRY(itlb_miss_perf)
+	lis	r10, (itlb_miss_counter - PAGE_OFFSET)@ha
+	lwz	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
+	addi	r11, r11, 1
+	stw	r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
+#endif
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
@@ -429,12 +437,6 @@ DataStoreTLBMiss:
 	mtspr	SPRN_SPRG_SCRATCH0, r10
 	mtspr	SPRN_SPRG_SCRATCH1, r11
 	mtspr	SPRN_SPRG_SCRATCH2, r12
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
-	lis	r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
-	lwz	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
-	addi	r11, r11, 1
-	stw	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
-#endif
 	mfcr	r12
 
 	/* If we are faulting a kernel address, we have to use the
@@ -526,6 +528,18 @@ _ENTRY(DTLBMiss_jmp)
 
 	/* Restore registers */
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
+_ENTRY(dtlb_miss_exit_1)
+	mfspr	r10, SPRN_SPRG_SCRATCH0
+	mfspr	r11, SPRN_SPRG_SCRATCH1
+	mfspr	r12, SPRN_SPRG_SCRATCH2
+	rfi
+#ifdef CONFIG_PERF_EVENTS
+_ENTRY(dtlb_miss_perf)
+	lis	r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
+	lwz	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
+	addi	r11, r11, 1
+	stw	r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
+#endif
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 	mfspr	r12, SPRN_SPRG_SCRATCH2
@@ -635,7 +649,7 @@ DataBreakpoint:
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 	rfi
 
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#ifdef CONFIG_PERF_EVENTS
 	. = 0x1d00
 InstructionBreakpoint:
 	mtspr	SPRN_SPRG_SCRATCH0, r10
@@ -675,6 +689,7 @@ DTLBMissIMMR:
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
+_ENTRY(dtlb_miss_exit_2)
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 	mfspr	r12, SPRN_SPRG_SCRATCH2
@@ -692,6 +707,7 @@ DTLBMissLinear:
 
 	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
+_ENTRY(dtlb_miss_exit_3)
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 	mfspr	r12, SPRN_SPRG_SCRATCH2
@@ -708,6 +724,7 @@ ITLBMissLinear:
 			  _PAGE_PRESENT
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
+_ENTRY(itlb_miss_exit_2)
 	mfspr	r10, SPRN_SPRG_SCRATCH0
 	mfspr	r11, SPRN_SPRG_SCRATCH1
 	mfspr	r12, SPRN_SPRG_SCRATCH2
@@ -1039,7 +1056,7 @@ initial_mmu:
 #endif
 	/* Disable debug mode entry on breakpoints */
 	mfspr	r8, SPRN_DER
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#ifdef CONFIG_PERF_EVENTS
 	rlwinm	r8, r8, 0, ~0xc
 #else
 	rlwinm	r8, r8, 0, ~0x8
@@ -1072,7 +1089,7 @@ swapper_pg_dir:
 abatron_pteptrs:
 	.space	8
 
-#ifdef CONFIG_PPC_8xx_PERF_EVENT
+#ifdef CONFIG_PERF_EVENTS
 	.globl	itlb_miss_counter
 itlb_miss_counter:
 	.space	4
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 3c39f05f0af3..6c0020d1c561 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -18,6 +18,7 @@
 #include <asm/machdep.h>
 #include <asm/firmware.h>
 #include <asm/ptrace.h>
+#include <asm/code-patching.h>
 
 #define PERF_8xx_ID_CPU_CYCLES		1
 #define PERF_8xx_ID_HW_INSTRUCTIONS	2
@@ -30,8 +31,13 @@
 
 extern unsigned long itlb_miss_counter, dtlb_miss_counter;
 extern atomic_t instruction_counter;
+extern unsigned int itlb_miss_perf, dtlb_miss_perf;
+extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2;
+extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3;
 
 static atomic_t insn_ctr_ref;
+static atomic_t itlb_miss_ref;
+static atomic_t dtlb_miss_ref;
 
 static s64 get_insn_ctr(void)
 {
@@ -96,9 +102,24 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
 		val = get_insn_ctr();
 		break;
 	case PERF_8xx_ID_ITLB_LOAD_MISS:
+		if (atomic_inc_return(&itlb_miss_ref) == 1) {
+			unsigned long target = (unsigned long)&itlb_miss_perf;
+
+			patch_branch(&itlb_miss_exit_1, target, 0);
+#ifndef CONFIG_PIN_TLB_TEXT
+			patch_branch(&itlb_miss_exit_2, target, 0);
+#endif
+		}
 		val = itlb_miss_counter;
 		break;
 	case PERF_8xx_ID_DTLB_LOAD_MISS:
+		if (atomic_inc_return(&dtlb_miss_ref) == 1) {
+			unsigned long target = (unsigned long)&dtlb_miss_perf;
+
+			patch_branch(&dtlb_miss_exit_1, target, 0);
+			patch_branch(&dtlb_miss_exit_2, target, 0);
+			patch_branch(&dtlb_miss_exit_3, target, 0);
+		}
 		val = dtlb_miss_counter;
 		break;
 	}
@@ -143,13 +164,36 @@ static void mpc8xx_pmu_read(struct perf_event *event)
 
 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 {
+	/* mfspr r10, SPRN_SPRG_SCRATCH0 */
+	unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
+			    __PPC_SPR(SPRN_SPRG_SCRATCH0);
+
 	mpc8xx_pmu_read(event);
-	if (event_type(event) != PERF_8xx_ID_HW_INSTRUCTIONS)
-		return;
 
 	/* If it was the last user, stop counting to avoid useles overhead */
-	if (atomic_dec_return(&insn_ctr_ref) == 0)
-		mtspr(SPRN_ICTRL, 7);
+	switch (event_type(event)) {
+	case PERF_8xx_ID_CPU_CYCLES:
+		break;
+	case PERF_8xx_ID_HW_INSTRUCTIONS:
+		if (atomic_dec_return(&insn_ctr_ref) == 0)
+			mtspr(SPRN_ICTRL, 7);
+		break;
+	case PERF_8xx_ID_ITLB_LOAD_MISS:
+		if (atomic_dec_return(&itlb_miss_ref) == 0) {
+			patch_instruction(&itlb_miss_exit_1, insn);
+#ifndef CONFIG_PIN_TLB_TEXT
+			patch_instruction(&itlb_miss_exit_2, insn);
+#endif
+		}
+		break;
+	case PERF_8xx_ID_DTLB_LOAD_MISS:
+		if (atomic_dec_return(&dtlb_miss_ref) == 0) {
+			patch_instruction(&dtlb_miss_exit_1, insn);
+			patch_instruction(&dtlb_miss_exit_2, insn);
+			patch_instruction(&dtlb_miss_exit_3, insn);
+		}
+		break;
+	}
 }
 
 static struct pmu mpc8xx_pmu = {
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 225c9c86d7c0..57ebc655d2ac 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
 
 obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o
 
-obj-$(CONFIG_PPC_8xx_PERF_EVENT) += 8xx-pmu.o
+obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o
 
 obj-$(CONFIG_PPC64)		+= $(obj64-y)
 obj-$(CONFIG_PPC32)		+= $(obj32-y)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 73a7ea333e9e..8944b24d2218 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -168,13 +168,6 @@ config PPC_FPU
 	bool
 	default y if PPC64
 
-config PPC_8xx_PERF_EVENT
-	bool "PPC 8xx perf events"
-	depends on PPC_8xx && PERF_EVENTS
-	help
-	  This is Performance Events support for PPC 8xx. The 8xx doesn't
-	  have a PMU but some events are emulated using 8xx features.
-
 config FSL_EMB_PERFMON
 	bool "Freescale Embedded Perfmon"
 	depends on E500 || PPC_83xx
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/8] powerpc/8xx: remove unused _PAGE_WRITETHRU
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
  2018-01-12 12:45 ` [PATCH 2/8] powerpc/8xx: remove EXCEPTION_PROLOG/EPILOG_0 and change r3 to r12 Christophe Leroy
  2018-01-12 12:45 ` [PATCH 3/8] powerpc/8xx: Only perform perf counting when perf is in use Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 5/8] powerpc/mm: extend _PAGE_PRIVILEGED to all CPUs Christophe Leroy
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

_PAGE_WRITETHRU is only used in:
* AMIGA_Z2RAM block driver which is never activated on powerPC
* Video/FB driver which is for PPC_PMAC

Therefore, no need to spend time in 8xx TLB miss handlers for
handling it.

And by removing it, we free up bit 20 which then avoids having
to clear it on each TLB miss.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/nohash/32/pte-8xx.h | 3 +--
 arch/powerpc/include/asm/nohash/pgtable.h    | 2 ++
 arch/powerpc/kernel/head_8xx.S               | 5 -----
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 6dc0180fd5c7..19a5ecaef265 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -41,8 +41,7 @@
 #define _PAGE_GUARDED	0x0010	/* Copied to L1 G entry in DTLB */
 #define _PAGE_USER	0x0020	/* Copied to L1 APG lsb */
 #define _PAGE_EXEC	0x0040	/* Copied to L1 APG */
-#define _PAGE_WRITETHRU	0x0080	/* software: caching is write through */
-#define _PAGE_ACCESSED	0x0800	/* software: page referenced */
+#define _PAGE_ACCESSED	0x0080	/* software: page referenced */
 
 #define _PAGE_RO	0x0600	/* Supervisor RO, User no access */
 
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 5c68f4a59f75..84120d65d0e9 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -212,8 +212,10 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 				            _PAGE_COHERENT))
 
+#if _PAGE_WRITETHRU != 0
 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
+#endif
 
 #define pgprot_cached_noncoherent(prot) \
 		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 641c9a9d4db2..6399dcadf51d 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -486,10 +486,6 @@ _ENTRY(DTLBMiss_jmp)
 	 * above.
 	 */
 	rlwimi	r11, r10, 0, 26, 27
-	/* Insert the WriteThru flag into the TWC from the Linux PTE.
-	 * It is bit 25 in the Linux PTE and bit 30 in the TWC
-	 */
-	rlwimi	r11, r10, 32-5, 30, 30
 	mtspr	SPRN_MD_TWC, r11
 
 	/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
@@ -523,7 +519,6 @@ _ENTRY(DTLBMiss_jmp)
 #else
 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
 #endif
-	rlwimi	r10, r11, 0, 20, 20	/* clear 20 */
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 5/8] powerpc/mm: extend _PAGE_PRIVILEGED to all CPUs
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
                   ` (2 preceding siblings ...)
  2018-01-12 12:45 ` [PATCH 4/8] powerpc/8xx: remove unused _PAGE_WRITETHRU Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 6/8] powerpc/mm: Introduce _PAGE_NA Christophe Leroy
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

commit ac29c64089b74 ("powerpc/mm: Replace _PAGE_USER with
_PAGE_PRIVILEGED") introduced _PAGE_PRIVILEGED for BOOK3S/64

This patch generalises _PAGE_PRIVILEGED for all CPUs, allowing
to have either _PAGE_PRIVILEGED or _PAGE_USER or both.

PPC_8xx has a _PAGE_SHARED flag which is set for and only for
all non user pages. Lets rename it _PAGE_PRIVILEGED to remove
confusion as it has nothing to do with Linux shared pages.

On BookE, there's a _PAGE_BAP_SR which has to be set for kernel
pages: defining _PAGE_PRIVILEGED as _PAGE_BAP_SR will make
this generic

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
 arch/powerpc/include/asm/nohash/32/pte-8xx.h | 10 +---------
 arch/powerpc/include/asm/nohash/pte-book3e.h |  1 +
 arch/powerpc/include/asm/pte-common.h        | 24 ++++++++++++++++--------
 arch/powerpc/kernel/head_8xx.S               |  6 +++---
 arch/powerpc/mm/8xx_mmu.c                    |  2 +-
 arch/powerpc/mm/dump_linuxpagetables.c       | 11 +----------
 arch/powerpc/mm/pgtable.c                    |  3 ++-
 arch/powerpc/mm/pgtable_32.c                 |  9 +--------
 arch/powerpc/mm/pgtable_64.c                 | 14 +-------------
 10 files changed, 28 insertions(+), 54 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 44697817ccc6..db38050e1a98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -15,7 +15,7 @@
 #define _PAGE_BIT_SWAP_TYPE	0
 
 #define _PAGE_RO		0
-#define _PAGE_SHARED		0
+#define _PAGE_USER		0
 
 #define _PAGE_EXEC		0x00001 /* execute permission */
 #define _PAGE_WRITE		0x00002 /* write access allowed */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 19a5ecaef265..7c7040f015e2 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -31,7 +31,7 @@
 /* Definitions for 8xx embedded chips. */
 #define _PAGE_PRESENT	0x0001	/* Page is valid */
 #define _PAGE_NO_CACHE	0x0002	/* I: cache inhibit */
-#define _PAGE_SHARED	0x0004	/* No ASID (context) compare */
+#define _PAGE_PRIVILEGED	0x0004	/* No ASID (context) compare */
 #define _PAGE_SPECIAL	0x0008	/* SW entry, forced to 0 by the TLB miss */
 #define _PAGE_DIRTY	0x0100	/* C: page changed */
 
@@ -54,13 +54,5 @@
 /* Until my rework is finished, 8xx still needs atomic PTE updates */
 #define PTE_ATOMIC_UPDATES	1
 
-/* We need to add _PAGE_SHARED to kernel pages */
-#define _PAGE_KERNEL_RO		(_PAGE_SHARED | _PAGE_RO)
-#define _PAGE_KERNEL_ROX	(_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
-#define _PAGE_KERNEL_RW		(_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
-				 _PAGE_HWWRITE)
-#define _PAGE_KERNEL_RWX	(_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
-				 _PAGE_HWWRITE | _PAGE_EXEC)
-
 #endif /* __KERNEL__ */
 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
index 2da4532ca377..ccee8eb509bb 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -55,6 +55,7 @@
 #define _PAGE_KERNEL_RWX	(_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
 #define _PAGE_KERNEL_ROX	(_PAGE_BAP_SR | _PAGE_BAP_SX)
 #define _PAGE_USER		(_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+#define _PAGE_PRIVILEGED	(_PAGE_BAP_SR)
 
 #define _PAGE_HASHPTE	0
 #define _PAGE_BUSY	0
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index ce142ef99ba7..0e6595a1b9d8 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -8,9 +8,6 @@
 #ifndef _PAGE_HASHPTE
 #define _PAGE_HASHPTE	0
 #endif
-#ifndef _PAGE_SHARED
-#define _PAGE_SHARED	0
-#endif
 #ifndef _PAGE_HWWRITE
 #define _PAGE_HWWRITE	0
 #endif
@@ -45,6 +42,14 @@
 #ifndef _PAGE_PTE
 #define _PAGE_PTE 0
 #endif
+/* At least one of _PAGE_PRIVILEGED or _PAGE_USER must be defined */
+#ifndef _PAGE_PRIVILEGED
+#define _PAGE_PRIVILEGED 0
+#else
+#ifndef _PAGE_USER
+#define _PAGE_USER 0
+#endif
+#endif
 
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK	_PMD_PRESENT
@@ -54,16 +59,18 @@
 #define PMD_PAGE_SIZE(pmd)	bad_call_to_PMD_PAGE_SIZE()
 #endif
 #ifndef _PAGE_KERNEL_RO
-#define _PAGE_KERNEL_RO		(_PAGE_RO)
+#define _PAGE_KERNEL_RO		(_PAGE_PRIVILEGED | _PAGE_RO)
 #endif
 #ifndef _PAGE_KERNEL_ROX
-#define _PAGE_KERNEL_ROX	(_PAGE_EXEC | _PAGE_RO)
+#define _PAGE_KERNEL_ROX	(_PAGE_PRIVILEGED | _PAGE_RO | _PAGE_EXEC)
 #endif
 #ifndef _PAGE_KERNEL_RW
-#define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
+				 _PAGE_HWWRITE)
 #endif
 #ifndef _PAGE_KERNEL_RWX
-#define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
+				 _PAGE_HWWRITE | _PAGE_EXEC)
 #endif
 #ifndef _PAGE_HPTEFLAGS
 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
@@ -85,7 +92,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
  */
 static inline bool pte_user(pte_t pte)
 {
-	return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+	return (pte_val(pte) & (_PAGE_USER | _PAGE_PRIVILEGED)) == _PAGE_USER;
 }
 #endif /* __ASSEMBLY__ */
 
@@ -116,6 +123,7 @@ static inline bool pte_user(pte_t pte)
 #define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
 			 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
 			 _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \
+			 _PAGE_PRIVILEGED | \
 			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
 
 /*
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6399dcadf51d..642680389b7e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -678,7 +678,7 @@ DTLBMissIMMR:
 	mtspr	SPRN_MD_TWC, r10
 	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
 	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
-	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
+	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
 			  _PAGE_PRESENT | _PAGE_NO_CACHE
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
@@ -696,7 +696,7 @@ DTLBMissLinear:
 	li	r11, MD_PS8MEG | MD_SVALID
 	mtspr	SPRN_MD_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
-	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
+	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
 			  _PAGE_PRESENT
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
@@ -715,7 +715,7 @@ ITLBMissLinear:
 	li	r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
 	mtspr	SPRN_MI_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
-	ori	r10, r10, 0xf0 | MI_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
+	ori	r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
 			  _PAGE_PRESENT
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 0be77709446c..5d53684c2ebd 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -67,7 +67,7 @@ void __init MMU_init_hw(void)
 	/* PIN up to the 3 first 8Mb after IMMR in DTLB table */
 #ifdef CONFIG_PIN_TLB_DATA
 	unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
-	unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY;
+	unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY;
 #ifdef CONFIG_PIN_TLB_IMMR
 	int i = 29;
 #else
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index c2e7dea59490..d9547e1ec5ef 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -112,13 +112,8 @@ struct flag_info {
 
 static const struct flag_info flag_array[] = {
 	{
-#ifdef CONFIG_PPC_BOOK3S_64
-		.mask	= _PAGE_PRIVILEGED,
-		.val	= 0,
-#else
-		.mask	= _PAGE_USER,
+		.mask	= _PAGE_USER | _PAGE_PRIVILEGED,
 		.val	= _PAGE_USER,
-#endif
 		.set	= "user",
 		.clear	= "    ",
 	}, {
@@ -228,10 +223,6 @@ static const struct flag_info flag_array[] = {
 		.mask	= _PAGE_SPECIAL,
 		.val	= _PAGE_SPECIAL,
 		.set	= "special",
-	}, {
-		.mask	= _PAGE_SHARED,
-		.val	= _PAGE_SHARED,
-		.set	= "shared",
 	}
 };
 
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index a03ff3d99e0c..9f361ae571e9 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -54,7 +54,8 @@ static inline int pte_looks_normal(pte_t pte)
 	return 0;
 #else
 	return (pte_val(pte) &
-		(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
+		(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
+		 _PAGE_PRIVILEGED)) ==
 		(_PAGE_PRESENT | _PAGE_USER);
 #endif
 }
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f6c7f54c0515..d35d9ad3c1cd 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -98,14 +98,7 @@ ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
 
 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
 	flags &= ~(_PAGE_USER | _PAGE_EXEC);
-
-#ifdef _PAGE_BAP_SR
-	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
-	 * which means that we just cleared supervisor access... oops ;-) This
-	 * restores it
-	 */
-	flags |= _PAGE_BAP_SR;
-#endif
+	flags |= _PAGE_PRIVILEGED;
 
 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 813ea22c3e00..c9a623c2d8a2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -244,20 +244,8 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
 	/*
 	 * Force kernel mapping.
 	 */
-#if defined(CONFIG_PPC_BOOK3S_64)
-	flags |= _PAGE_PRIVILEGED;
-#else
 	flags &= ~_PAGE_USER;
-#endif
-
-
-#ifdef _PAGE_BAP_SR
-	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
-	 * which means that we just cleared supervisor access... oops ;-) This
-	 * restores it
-	 */
-	flags |= _PAGE_BAP_SR;
-#endif
+	flags |= _PAGE_PRIVILEGED;
 
 	if (ppc_md.ioremap)
 		return ppc_md.ioremap(addr, size, flags, caller);
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 6/8] powerpc/mm: Introduce _PAGE_NA
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
                   ` (3 preceding siblings ...)
  2018-01-12 12:45 ` [PATCH 5/8] powerpc/mm: extend _PAGE_PRIVILEGED to all CPUs Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 7/8] powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level Christophe Leroy
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

Today, PAGE_NONE is defined as a page not having _PAGE_USER.
In some circunstances, when the CPU supports it, it might be
better to be able to flag a page with NO ACCESS.

In a following patch, the 8xx will switch user access being flagged
in the PMD, therefore it will not be possible anymore to use
_PAGE_USER as a way to flag a page with no access.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h |  1 +
 arch/powerpc/include/asm/nohash/32/pgtable.h |  2 +-
 arch/powerpc/include/asm/pte-common.h        |  7 +++++--
 arch/powerpc/mm/dump_linuxpagetables.c       | 18 +++++++++++-------
 4 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index db38050e1a98..f1c43d9b0773 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -14,6 +14,7 @@
  */
 #define _PAGE_BIT_SWAP_TYPE	0
 
+#define _PAGE_NA		0
 #define _PAGE_RO		0
 #define _PAGE_USER		0
 
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index cc2bfec3aa3b..504a3c36ce5c 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -282,7 +282,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
 {
 	unsigned long set = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+	unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
 
 	pte_update(ptep, clr, set);
 }
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 0e6595a1b9d8..426a902816c5 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -50,6 +50,9 @@
 #define _PAGE_USER 0
 #endif
 #endif
+#ifndef _PAGE_NA
+#define _PAGE_NA 0
+#endif
 
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK	_PMD_PRESENT
@@ -122,7 +125,7 @@ static inline bool pte_user(pte_t pte)
 /* Mask of bits returned by pte_pgprot() */
 #define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
 			 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
-			 _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \
+			 _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | _PAGE_NA | \
 			 _PAGE_PRIVILEGED | \
 			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
 
@@ -150,7 +153,7 @@ static inline bool pte_user(pte_t pte)
  *
  * Note due to the way vm flags are laid out, the bits are XWR
  */
-#define PAGE_NONE	__pgprot(_PAGE_BASE)
+#define PAGE_NONE	__pgprot(_PAGE_BASE | _PAGE_NA)
 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
 #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
 				 _PAGE_EXEC)
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index d9547e1ec5ef..4e8c6e593276 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -117,16 +117,20 @@ static const struct flag_info flag_array[] = {
 		.set	= "user",
 		.clear	= "    ",
 	}, {
-#if _PAGE_RO == 0
-		.mask	= _PAGE_RW,
+		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
 		.val	= _PAGE_RW,
-#else
-		.mask	= _PAGE_RO,
-		.val	= 0,
-#endif
 		.set	= "rw",
-		.clear	= "ro",
 	}, {
+		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
+		.val	= _PAGE_RO,
+		.set	= "ro",
+	}, {
+#if _PAGE_NA != 0
+		.mask	= _PAGE_RW | _PAGE_RO | _PAGE_NA,
+		.val	= _PAGE_RO,
+		.set	= "na",
+	}, {
+#endif
 		.mask	= _PAGE_EXEC,
 		.val	= _PAGE_EXEC,
 		.set	= " X ",
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 7/8] powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
                   ` (4 preceding siblings ...)
  2018-01-12 12:45 ` [PATCH 6/8] powerpc/mm: Introduce _PAGE_NA Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-12 12:45 ` [PATCH 8/8] powerpc/8xx: Use L1 entry APG to handle _PAGE_ACCESSED for CONFIG_SWAP Christophe Leroy
  2018-01-17 13:30 ` [1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Michael Ellerman
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

As Linux kernel separates KERNEL and USER address spaces, there is
therefore no need to flag USER access at page level.

Today, the 8xx TLB handlers already handle user access in the L1 entry
through Access Protection Groups, it is then natural to move the user
access handling at PMD level once _PAGE_NA allows to handle PAGE_NONE
protection without _PAGE_USER

In the mean time, as we free up one bit in the PTE, we can use it to
include SPS (page size flag) in the PTE and avoid handling it at every
TLB miss hence removing special handling based on compiled page size.

For _PAGE_EXEC, we rework it to use PP PTE bits, avoiding the copy
of _PAGE_EXEC bit into the L1 entry. Unfortunatly we are not
able to put it at the correct location as it conflicts with
NA/RO/RW bits for data entries.

Upper bits of APG in L1 entry overlap with PMD base address. In
order to avoid having to filter that out, we set up all groups so that
upper bits can have any value.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/hugetlb.h           |  3 +-
 arch/powerpc/include/asm/mmu-8xx.h           | 34 ++++++++++----------
 arch/powerpc/include/asm/nohash/32/pgalloc.h |  3 +-
 arch/powerpc/include/asm/nohash/32/pte-8xx.h | 14 ++++++---
 arch/powerpc/include/asm/nohash/pgtable.h    |  2 +-
 arch/powerpc/include/asm/pte-common.h        |  6 ++++
 arch/powerpc/kernel/head_8xx.S               | 46 ++++++----------------------
 arch/powerpc/mm/hugetlbpage.c                |  2 +-
 8 files changed, 48 insertions(+), 62 deletions(-)

diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 14c9d44f355b..1a4847f67ea8 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -47,8 +47,7 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
 {
 	BUG_ON(!hugepd_ok(hpd));
 #ifdef CONFIG_PPC_8xx
-	return (pte_t *)__va(hpd_val(hpd) &
-			     ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+	return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
 #else
 	return (pte_t *)((hpd_val(hpd) &
 			  ~HUGEPD_SHIFT_MASK) | PD_HUGE);
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 40aa7b0cd0dc..ae68f6c848d3 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -29,17 +29,17 @@
 #define MI_Kp		0x40000000	/* Should always be set */
 
 /*
- * All pages' PP exec bits are set to 000, which means Execute for Supervisor
- * and no Execute for User.
- * Then we use the APG to say whether accesses are according to Page rules,
- * "all Supervisor" rules (Exec for all) and "all User" rules (Exec for noone)
- * Therefore, we define 4 APG groups. msb is _PAGE_EXEC, lsb is _PAGE_USER
- * 0 (00) => Not User, no exec => 11 (all accesses performed as user)
- * 1 (01) => User but no exec => 11 (all accesses performed as user)
- * 2 (10) => Not User, exec => 01 (rights according to page definition)
- * 3 (11) => User, exec => 00 (all accesses performed as supervisor)
- */
-#define MI_APG_INIT	0xf4ffffff
+ * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
+ * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
+ * respectively NA for All or X for Supervisor and no access for User.
+ * Then we use the APG to say whether accesses are according to Page rules or
+ * "all Supervisor" rules (Access to all)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We define all 16 groups so that all other bits of APG can take any value
+ */
+#define MI_APG_INIT	0x44444444
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MI_RPN is written, bits in
@@ -102,17 +102,17 @@
 #define MD_Kp		0x40000000	/* Should always be set */
 
 /*
- * All pages' PP data bits are set to either 000 or 011, which means
+ * All pages' PP data bits are set to either 000 or 011 or 001, which means
  * respectively RW for Supervisor and no access for User, or RO for
- * Supervisor and no access for user.
+ * Supervisor and no access for user and NA for ALL.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PAGE_USER
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
  * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor
- *                                 according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We define all 16 groups so that all other bits of APG can take any value
  */
-#define MD_APG_INIT	0x4fffffff
+#define MD_APG_INIT	0x44444444
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MD_RPN is written, bits in
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index d072139ff2e5..29d37bd1f3b3 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -61,7 +61,8 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
 				pgtable_t pte_page)
 {
-	*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
+	*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
+		      _PMD_PRESENT);
 }
 
 #define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 7c7040f015e2..f04cb46ae8a1 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -32,27 +32,33 @@
 #define _PAGE_PRESENT	0x0001	/* Page is valid */
 #define _PAGE_NO_CACHE	0x0002	/* I: cache inhibit */
 #define _PAGE_PRIVILEGED	0x0004	/* No ASID (context) compare */
-#define _PAGE_SPECIAL	0x0008	/* SW entry, forced to 0 by the TLB miss */
+#define _PAGE_HUGE	0x0008	/* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
 #define _PAGE_DIRTY	0x0100	/* C: page changed */
 
 /* These 4 software bits must be masked out when the L2 entry is loaded
  * into the TLB.
  */
 #define _PAGE_GUARDED	0x0010	/* Copied to L1 G entry in DTLB */
-#define _PAGE_USER	0x0020	/* Copied to L1 APG lsb */
-#define _PAGE_EXEC	0x0040	/* Copied to L1 APG */
+#define _PAGE_SPECIAL	0x0020	/* SW entry */
+#define _PAGE_EXEC	0x0040	/* Copied to PP (bit 21) in ITLB */
 #define _PAGE_ACCESSED	0x0080	/* software: page referenced */
 
+#define _PAGE_NA	0x0200	/* Supervisor NA, User no access */
 #define _PAGE_RO	0x0600	/* Supervisor RO, User no access */
 
 #define _PMD_PRESENT	0x0001
-#define _PMD_BAD	0x0ff0
+#define _PMD_BAD	0x0fd0
 #define _PMD_PAGE_MASK	0x000c
 #define _PMD_PAGE_8M	0x000c
 #define _PMD_PAGE_512K	0x0004
+#define _PMD_USER	0x0020	/* APG 1 */
 
 /* Until my rework is finished, 8xx still needs atomic PTE updates */
 #define PTE_ATOMIC_UPDATES	1
 
+#ifdef CONFIG_PPC_16K_PAGES
+#define _PAGE_PSIZE	_PAGE_HUGE
+#endif
+
 #endif /* __KERNEL__ */
 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 84120d65d0e9..2fda3b291d9e 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -103,7 +103,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-	return pte;
+	return __pte(pte_val(pte) | _PAGE_HUGE);
 }
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 426a902816c5..c4a72c7a8c83 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -53,6 +53,9 @@
 #ifndef _PAGE_NA
 #define _PAGE_NA 0
 #endif
+#ifndef _PAGE_HUGE
+#define _PAGE_HUGE 0
+#endif
 
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK	_PMD_PRESENT
@@ -61,6 +64,9 @@
 #define _PMD_SIZE	0
 #define PMD_PAGE_SIZE(pmd)	bad_call_to_PMD_PAGE_SIZE()
 #endif
+#ifndef _PMD_USER
+#define _PMD_USER	0
+#endif
 #ifndef _PAGE_KERNEL_RO
 #define _PAGE_KERNEL_RO		(_PAGE_PRIVILEGED | _PAGE_RO)
 #endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 642680389b7e..c3b831bb8bad 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -52,11 +52,7 @@
  * Value for the bits that have fixed value in RPN entries.
  * Also used for tagging DAR for DTLBerror.
  */
-#ifdef CONFIG_PPC_16K_PAGES
-#define RPN_PATTERN	(0x00f0 | MD_SPS16K)
-#else
 #define RPN_PATTERN	0x00f0
-#endif
 
 #define PAGE_SHIFT_512K		19
 #define PAGE_SHIFT_8M		23
@@ -358,31 +354,23 @@ _ENTRY(ITLBMiss_cmp)
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
 	mtcr	r12
 #endif
-	/* Insert the APG into the TWC from the Linux PTE. */
-	rlwimi	r11, r10, 0, 25, 26
 	/* Load the MI_TWC with the attributes for this "segment." */
 	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
 
-#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
-	rlwimi	r10, r11, 1, MI_SPS16K
-#endif
 #ifdef CONFIG_SWAP
 	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
 	and	r11, r11, r10
 	rlwimi	r10, r11, 0, _PAGE_PRESENT
 #endif
-	li	r11, RPN_PATTERN
+	li	r11, RPN_PATTERN | 0x200
 	/* The Linux PTE won't go exactly into the MMU TLB.
-	 * Software indicator bits 20-23 and 28 must be clear.
-	 * Software indicator bits 24, 25, 26, and 27 must be
+	 * Software indicator bits 20 and 23 must be clear.
+	 * Software indicator bits 22, 24, 25, 26, and 27 must be
 	 * set.  All other Linux PTE bits control the behavior
 	 * of the MMU.
 	 */
-#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
-	rlwimi	r10, r11, 0, 0x0ff0	/* Set 24-27, clear 20-23 */
-#else
-	rlwimi	r10, r11, 0, 0x0ff8	/* Set 24-27, clear 20-23,28 */
-#endif
+	rlwimi	r11, r10, 4, 0x0400	/* Copy _PAGE_EXEC into bit 21 */
+	rlwimi	r10, r11, 0, 0x0ff0	/* Set 22, 24-27, clear 20,23 */
 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
@@ -419,7 +407,6 @@ _ENTRY(itlb_miss_perf)
 	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
 #endif
 	lwz	r10, 0(r10)	/* Get the pte */
-	rlwinm	r11, r11, 0, 0xf
 	b	4b
 
 20:	/* 512k pages */
@@ -428,7 +415,6 @@ _ENTRY(itlb_miss_perf)
 	/* Add level 2 base */
 	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
 	lwz	r10, 0(r10)	/* Get the pte */
-	rlwinm	r11, r11, 0, 0xf
 	b	4b
 #endif
 
@@ -479,20 +465,15 @@ _ENTRY(DTLBMiss_jmp)
 4:
 	mtcr	r12
 
-	/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
-	 * It is bit 26-27 of both the Linux PTE and the TWC (at least
+	/* Insert the Guarded flag into the TWC from the Linux PTE.
+	 * It is bit 27 of both the Linux PTE and the TWC (at least
 	 * I got that right :-).  It will be better when we can put
 	 * this into the Linux pgd/pmd and load it in the operation
 	 * above.
 	 */
-	rlwimi	r11, r10, 0, 26, 27
+	rlwimi	r11, r10, 0, _PAGE_GUARDED
 	mtspr	SPRN_MD_TWC, r11
 
-	/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
-	 * In 16k pages mode, SPS is always 1 */
-#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
-	rlwimi	r10, r11, 1, MD_SPS16K
-#endif
 	/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
 	 * We also need to know if the insn is a load/store, so:
 	 * Clear _PAGE_PRESENT and load that which will
@@ -508,17 +489,12 @@ _ENTRY(DTLBMiss_jmp)
 	rlwimi	r10, r11, 0, _PAGE_PRESENT
 #endif
 	/* The Linux PTE won't go exactly into the MMU TLB.
-	 * Software indicator bits 22 and 28 must be clear.
 	 * Software indicator bits 24, 25, 26, and 27 must be
 	 * set.  All other Linux PTE bits control the behavior
 	 * of the MMU.
 	 */
 	li	r11, RPN_PATTERN
-#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
 	rlwimi	r10, r11, 0, 24, 27	/* Set 24-27 */
-#else
-	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
-#endif
 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
 
 	/* Restore registers */
@@ -552,7 +528,6 @@ _ENTRY(dtlb_miss_perf)
 	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
 #endif
 	lwz	r10, 0(r10)	/* Get the pte */
-	rlwinm	r11, r11, 0, 0xf
 	b	4b
 
 20:	/* 512k pages */
@@ -561,7 +536,6 @@ _ENTRY(dtlb_miss_perf)
 	/* Add level 2 base */
 	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
 	lwz	r10, 0(r10)	/* Get the pte */
-	rlwinm	r11, r11, 0, 0xf
 	b	4b
 #endif
 
@@ -712,7 +686,7 @@ _ENTRY(dtlb_miss_exit_3)
 ITLBMissLinear:
 	mtcr	r12
 	/* Set 8M byte page and mark it valid */
-	li	r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
+	li	r11, MI_PS8MEG | MI_SVALID
 	mtspr	SPRN_MI_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
 	ori	r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
@@ -994,7 +968,7 @@ initial_mmu:
 	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
 	ori	r8, r8, MI_EVALID	/* Mark it valid */
 	mtspr	SPRN_MI_EPN, r8
-	li	r8, MI_PS8MEG | (2 << 5)	/* Set 8M byte page, APG 2 */
+	li	r8, MI_PS8MEG /* Set 8M byte page */
 	ori	r8, r8, MI_SVALID	/* Make it valid */
 	mtspr	SPRN_MI_TWC, r8
 	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0eadf9f199de..186ee8a08f6c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -97,7 +97,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 			*hpdp = __hugepd(__pa(new) |
 					 (shift_to_mmu_psize(pshift) << 2));
 #elif defined(CONFIG_PPC_8xx)
-			*hpdp = __hugepd(__pa(new) |
+			*hpdp = __hugepd(__pa(new) | _PMD_USER |
 					 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
 					  _PMD_PAGE_512K) | _PMD_PRESENT);
 #else
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 8/8] powerpc/8xx: Use L1 entry APG to handle _PAGE_ACCESSED for CONFIG_SWAP
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
                   ` (5 preceding siblings ...)
  2018-01-12 12:45 ` [PATCH 7/8] powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level Christophe Leroy
@ 2018-01-12 12:45 ` Christophe Leroy
  2018-01-17 13:30 ` [1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Michael Ellerman
  7 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2018-01-12 12:45 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, Scott Wood
  Cc: linux-kernel, linuxppc-dev

When CONFIG_SWAP is set, the TLB miss handlers have to also take
into account _PAGE_ACCESSED flag. At the moment it is done by
anding _PAGE_ACCESSED into _PAGE_PRESENT using 3 instructions.

This patch uses APG for handling _PAGE_ACCESSED, allowing to
just copy _PAGE_ACCESSED bit into APG field, hence reducing the
action to a single instruction.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/include/asm/mmu-8xx.h | 34 +++++++++++++++++++++++-----
 arch/powerpc/kernel/head_8xx.S     | 45 +++++++++++++++-----------------------
 arch/powerpc/mm/8xx_mmu.c          |  2 +-
 3 files changed, 47 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index ae68f6c848d3..ee5591fe6efc 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -34,12 +34,20 @@
  * respectively NA for All or X for Supervisor and no access for User.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
+ * When that bit is not set access is done iaw "all user"
+ * which means no access iaw page rules.
+ * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
+ * 0x => No access => 11 (all accesses performed as user iaw page definition)
+ * 10 => No user => 01 (all accesses performed according to page definition)
+ * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
  * We define all 16 groups so that all other bits of APG can take any value
  */
+#ifdef CONFIG_SWAP
+#define MI_APG_INIT	0xf4f4f4f4
+#else
 #define MI_APG_INIT	0x44444444
+#endif
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MI_RPN is written, bits in
@@ -107,12 +115,20 @@
  * Supervisor and no access for user and NA for ALL.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
+ * When that bit is not set access is done iaw "all user"
+ * which means no access iaw page rules.
+ * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
+ * 0x => No access => 11 (all accesses performed as user iaw page definition)
+ * 10 => No user => 01 (all accesses performed according to page definition)
+ * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
  * We define all 16 groups so that all other bits of APG can take any value
  */
+#ifdef CONFIG_SWAP
+#define MD_APG_INIT	0xf4f4f4f4
+#else
 #define MD_APG_INIT	0x44444444
+#endif
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MD_RPN is written, bits in
@@ -164,6 +180,12 @@
  */
 #define SPRN_M_TW	799
 
+/* APGs */
+#define M_APG0		0x00000000
+#define M_APG1		0x00000020
+#define M_APG2		0x00000040
+#define M_APG3		0x00000060
+
 #ifndef __ASSEMBLY__
 typedef struct {
 	unsigned int id;
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index c3b831bb8bad..d8670a37d70c 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -354,14 +354,13 @@ _ENTRY(ITLBMiss_cmp)
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
 	mtcr	r12
 #endif
-	/* Load the MI_TWC with the attributes for this "segment." */
-	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
 
 #ifdef CONFIG_SWAP
-	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
-	and	r11, r11, r10
-	rlwimi	r10, r11, 0, _PAGE_PRESENT
+	rlwinm	r11, r10, 31, _PAGE_ACCESSED >> 1
 #endif
+	/* Load the MI_TWC with the attributes for this "segment." */
+	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
+
 	li	r11, RPN_PATTERN | 0x200
 	/* The Linux PTE won't go exactly into the MMU TLB.
 	 * Software indicator bits 20 and 23 must be clear.
@@ -472,22 +471,14 @@ _ENTRY(DTLBMiss_jmp)
 	 * above.
 	 */
 	rlwimi	r11, r10, 0, _PAGE_GUARDED
-	mtspr	SPRN_MD_TWC, r11
-
-	/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
-	 * We also need to know if the insn is a load/store, so:
-	 * Clear _PAGE_PRESENT and load that which will
-	 * trap into DTLB Error with store bit set accordinly.
-	 */
-	/* PRESENT=0x1, ACCESSED=0x20
-	 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
-	 * r10 = (r10 & ~PRESENT) | r11;
-	 */
 #ifdef CONFIG_SWAP
-	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
-	and	r11, r11, r10
-	rlwimi	r10, r11, 0, _PAGE_PRESENT
+	/* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0
+	 * on that bit will represent a Non Access group
+	 */
+	rlwinm	r11, r10, 31, _PAGE_ACCESSED >> 1
 #endif
+	mtspr	SPRN_MD_TWC, r11
+
 	/* The Linux PTE won't go exactly into the MMU TLB.
 	 * Software indicator bits 24, 25, 26, and 27 must be
 	 * set.  All other Linux PTE bits control the behavior
@@ -647,8 +638,8 @@ InstructionBreakpoint:
  */
 DTLBMissIMMR:
 	mtcr	r12
-	/* Set 512k byte guarded page and mark it valid */
-	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
+	/* Set 512k byte guarded page and mark it valid and accessed */
+	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2
 	mtspr	SPRN_MD_TWC, r10
 	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
 	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
@@ -666,8 +657,8 @@ _ENTRY(dtlb_miss_exit_2)
 
 DTLBMissLinear:
 	mtcr	r12
-	/* Set 8M byte page and mark it valid */
-	li	r11, MD_PS8MEG | MD_SVALID
+	/* Set 8M byte page and mark it valid and accessed */
+	li	r11, MD_PS8MEG | MD_SVALID | M_APG2
 	mtspr	SPRN_MD_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
 	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
@@ -685,8 +676,8 @@ _ENTRY(dtlb_miss_exit_3)
 #ifndef CONFIG_PIN_TLB_TEXT
 ITLBMissLinear:
 	mtcr	r12
-	/* Set 8M byte page and mark it valid */
-	li	r11, MI_PS8MEG | MI_SVALID
+	/* Set 8M byte page and mark it valid,accessed */
+	li	r11, MI_PS8MEG | MI_SVALID | M_APG2
 	mtspr	SPRN_MI_TWC, r11
 	rlwinm	r10, r10, 0, 0x0f800000	/* 8xx supports max 256Mb RAM */
 	ori	r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
@@ -969,7 +960,7 @@ initial_mmu:
 	ori	r8, r8, MI_EVALID	/* Mark it valid */
 	mtspr	SPRN_MI_EPN, r8
 	li	r8, MI_PS8MEG /* Set 8M byte page */
-	ori	r8, r8, MI_SVALID	/* Make it valid */
+	ori	r8, r8, MI_SVALID | M_APG2	/* Make it valid, APG 2 */
 	mtspr	SPRN_MI_TWC, r8
 	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
 	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
@@ -996,7 +987,7 @@ initial_mmu:
 	ori	r8, r8, MD_EVALID	/* Mark it valid */
 	mtspr	SPRN_MD_EPN, r8
 	li	r8, MD_PS512K | MD_GUARDED	/* Set 512k byte page */
-	ori	r8, r8, MD_SVALID	/* Make it valid */
+	ori	r8, r8, MD_SVALID | M_APG2	/* Make it valid and accessed */
 	mtspr	SPRN_MD_TWC, r8
 	mr	r8, r9			/* Create paddr for TLB */
 	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 5d53684c2ebd..cf77d755246d 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -79,7 +79,7 @@ void __init MMU_init_hw(void)
 	for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
 		mtspr(SPRN_MD_CTR, ctr | (i << 8));
 		mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
-		mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
+		mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2);
 		mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
 		addr += LARGE_PAGE_SIZE_8M;
 		mem -= LARGE_PAGE_SIZE_8M;
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround
  2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
                   ` (6 preceding siblings ...)
  2018-01-12 12:45 ` [PATCH 8/8] powerpc/8xx: Use L1 entry APG to handle _PAGE_ACCESSED for CONFIG_SWAP Christophe Leroy
@ 2018-01-17 13:30 ` Michael Ellerman
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Ellerman @ 2018-01-17 13:30 UTC (permalink / raw)
  To: Christophe Leroy, Benjamin Herrenschmidt, Paul Mackerras, Scott Wood
  Cc: linuxppc-dev, linux-kernel

On Fri, 2018-01-12 at 12:45:19 UTC, Christophe Leroy wrote:
> CPU6 ERRATA affects only MPC860 revisions prior to C.0. Manufacturing
> of those revisiosn was stopped in 1999-2000.
> Therefore, it has been almost 20 years since this ERRATA has been
> fixed in the silicon.
> 
> This patch removes the workaround for that ERRATA.
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>

Series applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/2a45addd21de25f41c8f21a6f08f09

cheers

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-01-17 13:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-12 12:45 [PATCH 1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Christophe Leroy
2018-01-12 12:45 ` [PATCH 2/8] powerpc/8xx: remove EXCEPTION_PROLOG/EPILOG_0 and change r3 to r12 Christophe Leroy
2018-01-12 12:45 ` [PATCH 3/8] powerpc/8xx: Only perform perf counting when perf is in use Christophe Leroy
2018-01-12 12:45 ` [PATCH 4/8] powerpc/8xx: remove unused _PAGE_WRITETHRU Christophe Leroy
2018-01-12 12:45 ` [PATCH 5/8] powerpc/mm: extend _PAGE_PRIVILEGED to all CPUs Christophe Leroy
2018-01-12 12:45 ` [PATCH 6/8] powerpc/mm: Introduce _PAGE_NA Christophe Leroy
2018-01-12 12:45 ` [PATCH 7/8] powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level Christophe Leroy
2018-01-12 12:45 ` [PATCH 8/8] powerpc/8xx: Use L1 entry APG to handle _PAGE_ACCESSED for CONFIG_SWAP Christophe Leroy
2018-01-17 13:30 ` [1/8] powerpc/8xx: Remove CPU6 ERRATA Workaround Michael Ellerman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).