All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/4] Introduce PMSAv8 memory protection unit
@ 2018-02-16 14:14 Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 1/4] ARM: NOMMU: Move PMSAv7 MPU under it's own namespace Vladimir Murzin
                   ` (5 more replies)
  0 siblings, 6 replies; 9+ messages in thread
From: Vladimir Murzin @ 2018-02-16 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

This series adds support for PMSAv8 MPU defined by ARMv8R/M
architecture.

Changelog:

     v1 -> v2
        - Included missed patch.
	- Folded "ARM: NOMMU: Make _stext and _end meet PMSAv8 alignment
	  restrictions" into "ARM: NOMMU: Support PMSAv8 MPU" since it
	  allows us to refer PMSAv8_MINALIGN in linker scripts.
	- Improved and tested XIP support

    RFC -> v1
        - Rebased on v4.16-rc1
	- Added Tested-by for v7M bits from Andras

Thanks!

Vladimir Murzin (4):
  ARM: NOMMU: Move PMSAv7 MPU under it's own namespace
  ARM: NOMMU: Reorganise __setup_mpu
  ARM: NOMMU: Postpone MPU activation till __after_proc_init
  ARM: NOMMU: Support PMSAv8 MPU

 arch/arm/include/asm/mpu.h        | 112 +++++++++-----
 arch/arm/include/asm/v7m.h        |  14 +-
 arch/arm/kernel/asm-offsets.c     |   8 +-
 arch/arm/kernel/head-nommu.S      | 289 ++++++++++++++++++++++++++++-------
 arch/arm/kernel/vmlinux-xip.lds.S |   4 +
 arch/arm/kernel/vmlinux.lds.S     |   7 +
 arch/arm/mm/Makefile              |   2 +-
 arch/arm/mm/nommu.c               |  32 ++++
 arch/arm/mm/pmsa-v7.c             |  59 +++-----
 arch/arm/mm/pmsa-v8.c             | 307 ++++++++++++++++++++++++++++++++++++++
 10 files changed, 701 insertions(+), 133 deletions(-)
 create mode 100644 arch/arm/mm/pmsa-v8.c

-- 
2.0.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 1/4] ARM: NOMMU: Move PMSAv7 MPU under it's own namespace
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
@ 2018-02-16 14:14 ` Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 2/4] ARM: NOMMU: Reorganise __setup_mpu Vladimir Murzin
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Vladimir Murzin @ 2018-02-16 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

We are going to support different MPU which programming model is not
compatible to PMSAv7, so move PMSAv7 MPU under it's own namespace.

Tested-by: Szemz? Andr?s <sza@esh.hu>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm/include/asm/mpu.h    | 62 +++++++++++++++-----------------
 arch/arm/include/asm/v7m.h    |  6 ++--
 arch/arm/kernel/asm-offsets.c |  6 ++--
 arch/arm/kernel/head-nommu.S  | 84 ++++++++++++++++++++++++-------------------
 arch/arm/mm/nommu.c           | 26 ++++++++++++++
 arch/arm/mm/pmsa-v7.c         | 59 +++++++++++++-----------------
 6 files changed, 133 insertions(+), 110 deletions(-)

diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index 6d1491c..fbde275 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -14,50 +14,50 @@
 #define MMFR0_PMSAv7		(3 << 4)
 
 /* MPU D/I Size Register fields */
-#define MPU_RSR_SZ		1
-#define MPU_RSR_EN		0
-#define MPU_RSR_SD		8
+#define PMSAv7_RSR_SZ		1
+#define PMSAv7_RSR_EN		0
+#define PMSAv7_RSR_SD		8
 
 /* Number of subregions (SD) */
-#define MPU_NR_SUBREGS		8
-#define MPU_MIN_SUBREG_SIZE	256
+#define PMSAv7_NR_SUBREGS	8
+#define PMSAv7_MIN_SUBREG_SIZE	256
 
 /* The D/I RSR value for an enabled region spanning the whole of memory */
-#define MPU_RSR_ALL_MEM		63
+#define PMSAv7_RSR_ALL_MEM	63
 
 /* Individual bits in the DR/IR ACR */
-#define MPU_ACR_XN		(1 << 12)
-#define MPU_ACR_SHARED		(1 << 2)
+#define PMSAv7_ACR_XN		(1 << 12)
+#define PMSAv7_ACR_SHARED	(1 << 2)
 
 /* C, B and TEX[2:0] bits only have semantic meanings when grouped */
-#define MPU_RGN_CACHEABLE	0xB
-#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
-#define MPU_RGN_STRONGLY_ORDERED 0
+#define PMSAv7_RGN_CACHEABLE		0xB
+#define PMSAv7_RGN_SHARED_CACHEABLE	(PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
+#define PMSAv7_RGN_STRONGLY_ORDERED	0
 
 /* Main region should only be shared for SMP */
 #ifdef CONFIG_SMP
-#define MPU_RGN_NORMAL		(MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#define PMSAv7_RGN_NORMAL	(PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
 #else
-#define MPU_RGN_NORMAL		MPU_RGN_CACHEABLE
+#define PMSAv7_RGN_NORMAL	PMSAv7_RGN_CACHEABLE
 #endif
 
 /* Access permission bits of ACR (only define those that we use)*/
-#define MPU_AP_PL1RO_PL0NA	(0x5 << 8)
-#define MPU_AP_PL1RW_PL0RW	(0x3 << 8)
-#define MPU_AP_PL1RW_PL0R0	(0x2 << 8)
-#define MPU_AP_PL1RW_PL0NA	(0x1 << 8)
+#define PMSAv7_AP_PL1RO_PL0NA	(0x5 << 8)
+#define PMSAv7_AP_PL1RW_PL0RW	(0x3 << 8)
+#define PMSAv7_AP_PL1RW_PL0R0	(0x2 << 8)
+#define PMSAv7_AP_PL1RW_PL0NA	(0x1 << 8)
 
 /* For minimal static MPU region configurations */
-#define MPU_PROBE_REGION	0
-#define MPU_BG_REGION		1
-#define MPU_RAM_REGION		2
-#define MPU_ROM_REGION		3
+#define PMSAv7_PROBE_REGION	0
+#define PMSAv7_BG_REGION	1
+#define PMSAv7_RAM_REGION	2
+#define PMSAv7_ROM_REGION	3
 
 /* Maximum number of regions Linux is interested in */
-#define MPU_MAX_REGIONS		16
+#define MPU_MAX_REGIONS	16
 
-#define MPU_DATA_SIDE		0
-#define MPU_INSTR_SIDE		1
+#define PMSAv7_DATA_SIDE	0
+#define PMSAv7_INSTR_SIDE	1
 
 #ifndef __ASSEMBLY__
 
@@ -75,16 +75,12 @@ struct mpu_rgn_info {
 extern struct mpu_rgn_info mpu_rgn_info;
 
 #ifdef CONFIG_ARM_MPU
-
-extern void __init adjust_lowmem_bounds_mpu(void);
-extern void __init mpu_setup(void);
-
+extern void __init pmsav7_adjust_lowmem_bounds(void);
+extern void __init pmsav7_setup(void);
 #else
-
-static inline void adjust_lowmem_bounds_mpu(void) {}
-static inline void mpu_setup(void) {}
-
-#endif /* !CONFIG_ARM_MPU */
+static inline void pmsav7_adjust_lowmem_bounds(void) {};
+static inline void pmsav7_setup(void) {};
+#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 634e771..aba49e0 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -64,9 +64,9 @@
 #define MPU_CTRL_ENABLE		1
 #define MPU_CTRL_PRIVDEFENA	(1 << 2)
 
-#define MPU_RNR			0x98
-#define MPU_RBAR		0x9c
-#define MPU_RASR		0xa0
+#define PMSAv7_RNR		0x98
+#define PMSAv7_RBAR		0x9c
+#define PMSAv7_RASR		0xa0
 
 /* Cache opeartions */
 #define	V7M_SCB_ICIALLU		0x250	/* I-cache invalidate all to PoU */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index f369ece..250a985 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -194,9 +194,9 @@ int main(void)
   DEFINE(MPU_RNG_INFO_USED,	offsetof(struct mpu_rgn_info, used));
 
   DEFINE(MPU_RNG_SIZE,		sizeof(struct mpu_rgn));
-  DEFINE(MPU_RGN_DRBAR,		offsetof(struct mpu_rgn, drbar));
-  DEFINE(MPU_RGN_DRSR,		offsetof(struct mpu_rgn, drsr));
-  DEFINE(MPU_RGN_DRACR,		offsetof(struct mpu_rgn, dracr));
+  DEFINE(MPU_RGN_DRBAR,	offsetof(struct mpu_rgn, drbar));
+  DEFINE(MPU_RGN_DRSR,	offsetof(struct mpu_rgn, drsr));
+  DEFINE(MPU_RGN_DRACR,	offsetof(struct mpu_rgn, dracr));
 #endif
   return 0; 
 }
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e38f85..0d17187 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,14 +68,6 @@ ENTRY(stext)
 	beq	__error_p				@ yes, error 'p'
 
 #ifdef CONFIG_ARM_MPU
-	/* Calculate the size of a region covering just the kernel */
-	ldr	r5, =PLAT_PHYS_OFFSET		@ Region start: PHYS_OFFSET
-	ldr     r6, =(_end)			@ Cover whole kernel
-	sub	r6, r6, r5			@ Minimum size of region to map
-	clz	r6, r6				@ Region size must be 2^N...
-	rsb	r6, r6, #31			@ ...so round up region size
-	lsl	r6, r6, #MPU_RSR_SZ		@ Put size in right field
-	orr	r6, r6, #(1 << MPU_RSR_EN)	@ Set region enabled bit
 	bl	__setup_mpu
 #endif
 
@@ -110,8 +102,6 @@ ENTRY(secondary_startup)
 	ldr	r7, __secondary_data
 
 #ifdef CONFIG_ARM_MPU
-	/* Use MPU region info supplied by __cpu_up */
-	ldr	r6, [r7]			@ get secondary_data.mpu_rgn_info
 	bl      __secondary_setup_mpu		@ Initialize the MPU
 #endif
 
@@ -184,7 +174,7 @@ ENDPROC(__after_proc_init)
 .endm
 
 /* Setup a single MPU region, either D or I side (D-side for unified) */
-.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused
+.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
 	mcr	p15, 0, \bar, c6, c1, (0 + \side)	@ I/DRBAR
 	mcr	p15, 0, \acr, c6, c1, (4 + \side)	@ I/DRACR
 	mcr	p15, 0, \sr, c6, c1, (2 + \side)		@ I/DRSR
@@ -192,14 +182,14 @@ ENDPROC(__after_proc_init)
 #else
 .macro set_region_nr tmp, rgnr, base
 	mov	\tmp, \rgnr
-	str     \tmp, [\base, #MPU_RNR]
+	str     \tmp, [\base, #PMSAv7_RNR]
 .endm
 
 .macro setup_region bar, acr, sr, unused, base
 	lsl     \acr, \acr, #16
 	orr     \acr, \acr, \sr
-	str     \bar, [\base, #MPU_RBAR]
-	str     \acr, [\base, #MPU_RASR]
+	str     \bar, [\base, #PMSAv7_RBAR]
+	str     \acr, [\base, #PMSAv7_RASR]
 .endm
 
 #endif
@@ -210,7 +200,7 @@ ENDPROC(__after_proc_init)
  * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
  * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
  *
- * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
+ * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
 */
 
 ENTRY(__setup_mpu)
@@ -223,7 +213,20 @@ AR_CLASS(mrc	p15, 0, r0, c0, c1, 4)		@ Read ID_MMFR0
 M_CLASS(ldr	r0, [r12, 0x50])
 	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
 	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
-	bxne	lr
+	beq	__setup_pmsa_v7
+
+	ret	lr
+ENDPROC(__setup_mpu)
+
+ENTRY(__setup_pmsa_v7)
+	/* Calculate the size of a region covering just the kernel */
+	ldr	r5, =PLAT_PHYS_OFFSET		@ Region start: PHYS_OFFSET
+	ldr     r6, =(_end)			@ Cover whole kernel
+	sub	r6, r6, r5			@ Minimum size of region to map
+	clz	r6, r6				@ Region size must be 2^N...
+	rsb	r6, r6, #31			@ ...so round up region size
+	lsl	r6, r6, #PMSAv7_RSR_SZ		@ Put size in right field
+	orr	r6, r6, #(1 << PMSAv7_RSR_EN)	@ Set region enabled bit
 
 	/* Determine whether the D/I-side memory map is unified. We set the
 	 * flags here and continue to use them for the rest of this function */
@@ -234,47 +237,47 @@ M_CLASS(ldr    r0, [r12, #MPU_TYPE])
 	tst	r0, #MPUIR_nU			@ MPUIR_nU = 0 for unified
 
 	/* Setup second region first to free up r6 */
-	set_region_nr r0, #MPU_RAM_REGION, r12
+	set_region_nr r0, #PMSAv7_RAM_REGION, r12
 	isb
 	/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
 	ldr	r0, =PLAT_PHYS_OFFSET		@ RAM starts at PHYS_OFFSET
-	ldr	r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
+	ldr	r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
 
-	setup_region r0, r5, r6, MPU_DATA_SIDE, r12	@ PHYS_OFFSET, shared, enabled
+	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ PHYS_OFFSET, shared, enabled
 	beq	1f					@ Memory-map not unified
-	setup_region r0, r5, r6, MPU_INSTR_SIDE, r12	@ PHYS_OFFSET, shared, enabled
+	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12	@ PHYS_OFFSET, shared, enabled
 1:	isb
 
 	/* First/background region */
-	set_region_nr r0, #MPU_BG_REGION, r12
+	set_region_nr r0, #PMSAv7_BG_REGION, r12
 	isb
 	/* Execute Never,  strongly ordered, inaccessible to PL0, rw PL1  */
 	mov	r0, #0				@ BG region starts at 0x0
-	ldr	r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
-	mov	r6, #MPU_RSR_ALL_MEM		@ 4GB region, enabled
+	ldr	r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
+	mov	r6, #PMSAv7_RSR_ALL_MEM		@ 4GB region, enabled
 
-	setup_region r0, r5, r6, MPU_DATA_SIDE, r12	@ 0x0, BG region, enabled
+	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ 0x0, BG region, enabled
 	beq	2f					@ Memory-map not unified
-	setup_region r0, r5, r6, MPU_INSTR_SIDE r12	@ 0x0, BG region, enabled
+	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12	@ 0x0, BG region, enabled
 2:	isb
 
 #ifdef CONFIG_XIP_KERNEL
-	set_region_nr r0, #MPU_ROM_REGION, r12
+	set_region_nr r0, #PMSAv7_ROM_REGION, r12
 	isb
 
-	ldr	r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL)
+	ldr	r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
 
 	ldr	r0, =CONFIG_XIP_PHYS_ADDR		@ ROM start
 	ldr     r6, =(_exiprom)				@ ROM end
 	sub	r6, r6, r0				@ Minimum size of region to map
 	clz	r6, r6					@ Region size must be 2^N...
 	rsb	r6, r6, #31				@ ...so round up region size
-	lsl	r6, r6, #MPU_RSR_SZ			@ Put size in right field
-	orr	r6, r6, #(1 << MPU_RSR_EN)		@ Set region enabled bit
+	lsl	r6, r6, #PMSAv7_RSR_SZ			@ Put size in right field
+	orr	r6, r6, #(1 << PMSAv7_RSR_EN)		@ Set region enabled bit
 
-	setup_region r0, r5, r6, MPU_DATA_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
+	setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
 	beq	3f					@ Memory-map not unified
-	setup_region r0, r5, r6, MPU_INSTR_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
+	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
 3:	isb
 #endif
 
@@ -291,7 +294,7 @@ M_CLASS(str	r0, [r12, #MPU_CTRL])
 	isb
 
 	ret	lr
-ENDPROC(__setup_mpu)
+ENDPROC(__setup_pmsa_v7)
 
 #ifdef CONFIG_SMP
 /*
@@ -299,12 +302,21 @@ ENDPROC(__setup_mpu)
  */
 
 ENTRY(__secondary_setup_mpu)
+	/* Use MPU region info supplied by __cpu_up */
+	ldr	r6, [r7]			@ get secondary_data.mpu_rgn_info
+
 	/* Probe for v7 PMSA compliance */
 	mrc	p15, 0, r0, c0, c1, 4		@ Read ID_MMFR0
 	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
 	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
-	bne	__error_p
+	beq	__secondary_setup_pmsa_v7
+ 	b	__error_p
+ENDPROC(__secondary_setup_mpu)
 
+/*
+ * r6: pointer@mpu_rgn_info
+ */
+ENTRY(__secondary_setup_pmsa_v7)
 	/* Determine whether the D/I-side memory map is unified. We set the
 	 * flags here and continue to use them for the rest of this function */
 	mrc	p15, 0, r0, c0, c0, 4		@ MPUIR
@@ -328,9 +340,9 @@ ENTRY(__secondary_setup_mpu)
 	ldr	r6, [r3, #MPU_RGN_DRSR]
 	ldr	r5, [r3, #MPU_RGN_DRACR]
 
-	setup_region r0, r5, r6, MPU_DATA_SIDE
+	setup_region r0, r5, r6, PMSAv7_DATA_SIDE
 	beq	2f
-	setup_region r0, r5, r6, MPU_INSTR_SIDE
+	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
 2:	isb
 
 	mrc	p15, 0, r0, c0, c0, 4		@ Reevaluate the MPUIR
@@ -345,7 +357,7 @@ ENTRY(__secondary_setup_mpu)
 	isb
 
 	ret	lr
-ENDPROC(__secondary_setup_mpu)
+ENDPROC(__secondary_setup_pmsa_v7)
 
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_ARM_MPU */
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 7c08796..edbaa47 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -99,6 +99,32 @@ void __init arm_mm_memblock_reserve(void)
 	memblock_reserve(0, 1);
 }
 
+static void __init adjust_lowmem_bounds_mpu(void)
+{
+	unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
+
+	switch (pmsa) {
+	case MMFR0_PMSAv7:
+		pmsav7_adjust_lowmem_bounds();
+		break;
+	default:
+		break;
+	}
+}
+
+static void __init mpu_setup(void)
+{
+	unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
+
+	switch (pmsa) {
+	case MMFR0_PMSAv7:
+		pmsav7_setup();
+		break;
+	default:
+		break;
+	}
+}
+
 void __init adjust_lowmem_bounds(void)
 {
 	phys_addr_t end;
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index e2853bf..699fa2e 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -102,7 +102,7 @@ static inline u32 irbar_read(void)
 
 static inline void rgnr_write(u32 v)
 {
-	writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR);
+	writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
 }
 
 /* Data-side / unified region attributes */
@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v)
 /* Region access control register */
 static inline void dracr_write(u32 v)
 {
-	u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0);
+	u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
 
-	writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR);
+	writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
 }
 
 /* Region size register */
 static inline void drsr_write(u32 v)
 {
-	u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16);
+	u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
 
-	writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR);
+	writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
 }
 
 /* Region base address register */
 static inline void drbar_write(u32 v)
 {
-	writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR);
+	writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
 }
 
 static inline u32 drbar_read(void)
 {
-	return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR);
+	return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
 }
 
 /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;}
 
 #endif
 
-static int __init mpu_present(void)
-{
-	return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
-}
-
 static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
 {
 	unsigned long  subreg, bslots, sslots;
@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
 
 	bdiff = base - abase;
 	sdiff = p2size - asize;
-	subreg = p2size / MPU_NR_SUBREGS;
+	subreg = p2size / PMSAv7_NR_SUBREGS;
 
 	if ((bdiff % subreg) || (sdiff % subreg))
 		return false;
@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
 	if (bslots || sslots) {
 		int i;
 
-		if (subreg < MPU_MIN_SUBREG_SIZE)
+		if (subreg < PMSAv7_MIN_SUBREG_SIZE)
 			return false;
 
-		if (bslots + sslots > MPU_NR_SUBREGS)
+		if (bslots + sslots > PMSAv7_NR_SUBREGS)
 			return false;
 
 		for (i = 0; i < bslots; i++)
 			_set_bit(i, &region->subreg);
 
 		for (i = 1; i <= sslots; i++)
-			_set_bit(MPU_NR_SUBREGS - i, &region->subreg);
+			_set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
 	}
 
 	region->base = abase;
@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
 }
 
 /* MPU initialisation functions */
-void __init adjust_lowmem_bounds_mpu(void)
+void __init pmsav7_adjust_lowmem_bounds(void)
 {
 	phys_addr_t  specified_mem_size = 0, total_mem_size = 0;
 	struct memblock_region *reg;
@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void)
 	unsigned int mem_max_regions;
 	int num, i;
 
-	if (!mpu_present())
-		return;
-
-	/* Free-up MPU_PROBE_REGION */
+	/* Free-up PMSAv7_PROBE_REGION */
 	mpu_min_region_order = __mpu_min_region_order();
 
 	/* How many regions are supported */
@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void)
 	num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
 
 	for (i = 0; i < num; i++) {
-		unsigned long  subreg = mem[i].size / MPU_NR_SUBREGS;
+		unsigned long  subreg = mem[i].size / PMSAv7_NR_SUBREGS;
 
 		total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
 
 		pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
-			 &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg);
+			 &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
 	}
 
 	if (total_mem_size != specified_mem_size) {
@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void)
 	u32 drbar_result, irbar_result;
 
 	/* We've kept a region free for this probing */
-	rgnr_write(MPU_PROBE_REGION);
+	rgnr_write(PMSAv7_PROBE_REGION);
 	isb();
 	/*
 	 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
 		return -ENOMEM;
 
 	/* Writing N to bits 5:1 (RSR_SZ)  specifies region size 2^N+1 */
-	size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
-	size_data |= subregions << MPU_RSR_SD;
+	size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
+	size_data |= subregions << PMSAv7_RSR_SD;
 
 	if (need_flush)
 		flush_cache_all();
@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
 /*
 * Set up default MPU regions, doing nothing if there is no MPU
 */
-void __init mpu_setup(void)
+void __init pmsav7_setup(void)
 {
 	int i, region = 0, err = 0;
 
-	if (!mpu_present())
-		return;
-
 	/* Setup MPU (order is important) */
 
 	/* Background */
 	err |= mpu_setup_region(region++, 0, 32,
-				MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW,
+				PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
 				0, false);
 
 #ifdef CONFIG_XIP_KERNEL
@@ -448,13 +437,13 @@ void __init mpu_setup(void)
                  * with BG region (which is uncachable), thus we need
                  * to clean and invalidate cache.
 		 */
-		bool need_flush = region == MPU_RAM_REGION;
+		bool need_flush = region == PMSAv7_RAM_REGION;
 
 		if (!xip[i].size)
 			continue;
 
 		err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
-					MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
+					PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
 					xip[i].subreg, need_flush);
 	}
 #endif
@@ -465,14 +454,14 @@ void __init mpu_setup(void)
 			continue;
 
 		err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
-					MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
+					PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
 					mem[i].subreg, false);
 	}
 
 	/* Vectors */
 #ifndef CONFIG_CPU_V7M
 	err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
-				MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
+				PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
 				0, false);
 #endif
 	if (err) {
-- 
2.0.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 2/4] ARM: NOMMU: Reorganise __setup_mpu
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 1/4] ARM: NOMMU: Move PMSAv7 MPU under it's own namespace Vladimir Murzin
@ 2018-02-16 14:14 ` Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 3/4] ARM: NOMMU: Postpone MPU activation till __after_proc_init Vladimir Murzin
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Vladimir Murzin @ 2018-02-16 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

Currently, we have mixed code placement between .head.text and .text
depends on configuration we are building:

_text                           M       R(UP)   R(SMP)
======================================================
 __setup_mpu                    __HEAD  __HEAD  text
 __after_proc_init              __HEAD  __HEAD  text
 __mmap_switched                text    text    text

We are going to support another variant of MPU which is different to
PMSAv7 in sense overlapping MPU regions are not allowed, so this patch
makes boundaries between these sections precise and consistent:

_text                           M       R(UP)   R(SMP)
======================================================
 __setup_mpu                    __HEAD  __HEAD  __HEAD
 __after_proc_init              text    text    text
 __mmap_switched                text    text    text

Additionally, it paves a path to postpone MPU activation till
__after_proc_init where we do set SCTLR anyway and can return
directly to __mmap_switched.

Tested-by: Szemz? Andr?s <sza@esh.hu>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm/kernel/head-nommu.S | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 0d17187..aaa25a6 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -75,8 +75,8 @@ ENTRY(stext)
 	ldr	r12, [r10, #PROCINFO_INITFUNC]
 	add	r12, r12, r10
 	ret	r12
-1:	bl	__after_proc_init
-	b	__mmap_switched
+1:	ldr	lr, =__mmap_switched
+	b	__after_proc_init
 ENDPROC(stext)
 
 #ifdef CONFIG_SMP
@@ -123,6 +123,7 @@ __secondary_data:
 /*
  * Set the Control Register and Read the process ID.
  */
+	.text
 __after_proc_init:
 #ifdef CONFIG_CPU_CP15
 	/*
@@ -202,6 +203,7 @@ ENDPROC(__after_proc_init)
  *
  * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
 */
+	__HEAD
 
 ENTRY(__setup_mpu)
 
@@ -301,6 +303,7 @@ ENDPROC(__setup_pmsa_v7)
  * r6: pointer at mpu_rgn_info
  */
 
+	.text
 ENTRY(__secondary_setup_mpu)
 	/* Use MPU region info supplied by __cpu_up */
 	ldr	r6, [r7]			@ get secondary_data.mpu_rgn_info
-- 
2.0.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 3/4] ARM: NOMMU: Postpone MPU activation till __after_proc_init
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 1/4] ARM: NOMMU: Move PMSAv7 MPU under it's own namespace Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 2/4] ARM: NOMMU: Reorganise __setup_mpu Vladimir Murzin
@ 2018-02-16 14:14 ` Vladimir Murzin
  2018-02-16 14:14 ` [PATCH v2 4/4] ARM: NOMMU: Support PMSAv8 MPU Vladimir Murzin
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Vladimir Murzin @ 2018-02-16 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

This patch postpone MPU activation till __after_proc_init (which is
placed in .text section) rather than doing it in __setup_mpu. It
allows us ignore used-only-once .head.text section while programming
PMSAv8 MPU (for PMSAv7 it stays covered anyway).

Tested-by: Szemz? Andr?s <sza@esh.hu>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm/kernel/head-nommu.S | 45 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 23 deletions(-)

diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index aaa25a6..482936a 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -125,11 +125,24 @@ __secondary_data:
  */
 	.text
 __after_proc_init:
+#ifdef CONFIG_ARM_MPU
+M_CLASS(movw	r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt	r12, #:upper16:BASEADDR_V7M_SCB)
+M_CLASS(ldr	r3, [r12, 0x50])
+AR_CLASS(mrc	p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
+	and	r3, r3, #(MMFR0_PMSA)           @ PMSA field
+	teq	r3, #(MMFR0_PMSAv7)             @ PMSA v7
+#endif
 #ifdef CONFIG_CPU_CP15
 	/*
 	 * CP15 system control register value returned in r0 from
 	 * the CPU init function.
 	 */
+
+#ifdef CONFIG_ARM_MPU
+	biceq	r0, r0, #CR_BR			@ Disable the 'default mem-map'
+	orreq	r0, r0, #CR_M			@ Set SCTRL.M (MPU on)
+#endif
 #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
 	orr	r0, r0, #CR_A
 #else
@@ -145,7 +158,15 @@ __after_proc_init:
 	bic	r0, r0, #CR_I
 #endif
 	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+	isb
 #elif defined (CONFIG_CPU_V7M)
+#ifdef CONFIG_ARM_MPU
+	ldreq	r3, [r12, MPU_CTRL]
+	biceq	r3, #MPU_CTRL_PRIVDEFENA
+	orreq	r3, #MPU_CTRL_ENABLE
+	streq	r3, [r12, MPU_CTRL]
+	isb
+#endif
 	/* For V7M systems we want to modify the CCR similarly to the SCTLR */
 #ifdef CONFIG_CPU_DCACHE_DISABLE
 	bic	r0, r0, #V7M_SCB_CCR_DC
@@ -156,9 +177,7 @@ __after_proc_init:
 #ifdef CONFIG_CPU_ICACHE_DISABLE
 	bic	r0, r0, #V7M_SCB_CCR_IC
 #endif
-	movw	r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
-	movt	r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
-	str	r0, [r3]
+	str	r0, [r12, V7M_SCB_CCR]
 #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
 	ret	lr
 ENDPROC(__after_proc_init)
@@ -282,19 +301,6 @@ M_CLASS(ldr    r0, [r12, #MPU_TYPE])
 	setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12	@ XIP_PHYS_ADDR, shared, enabled
 3:	isb
 #endif
-
-	/* Enable the MPU */
-AR_CLASS(mrc	p15, 0, r0, c1, c0, 0)		@ Read SCTLR
-AR_CLASS(bic	r0, r0, #CR_BR)			@ Disable the 'default mem-map'
-AR_CLASS(orr	r0, r0, #CR_M)			@ Set SCTRL.M (MPU on)
-AR_CLASS(mcr	p15, 0, r0, c1, c0, 0)		@ Enable MPU
-
-M_CLASS(ldr	r0, [r12, #MPU_CTRL])
-M_CLASS(bic	r0, #MPU_CTRL_PRIVDEFENA)
-M_CLASS(orr	r0, #MPU_CTRL_ENABLE)
-M_CLASS(str	r0, [r12, #MPU_CTRL])
-	isb
-
 	ret	lr
 ENDPROC(__setup_pmsa_v7)
 
@@ -352,13 +358,6 @@ ENTRY(__secondary_setup_pmsa_v7)
 	cmp	r4, #0
 	bgt	1b
 
-	/* Enable the MPU */
-	mrc	p15, 0, r0, c1, c0, 0		@ Read SCTLR
-	bic	r0, r0, #CR_BR			@ Disable the 'default mem-map'
-	orr	r0, r0, #CR_M			@ Set SCTRL.M (MPU on)
-	mcr	p15, 0, r0, c1, c0, 0		@ Enable MPU
-	isb
-
 	ret	lr
 ENDPROC(__secondary_setup_pmsa_v7)
 
-- 
2.0.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 4/4] ARM: NOMMU: Support PMSAv8 MPU
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
                   ` (2 preceding siblings ...)
  2018-02-16 14:14 ` [PATCH v2 3/4] ARM: NOMMU: Postpone MPU activation till __after_proc_init Vladimir Murzin
@ 2018-02-16 14:14 ` Vladimir Murzin
  2018-03-08 13:36 ` [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
  2018-03-27 13:25 ` Alexandre Torgue
  5 siblings, 0 replies; 9+ messages in thread
From: Vladimir Murzin @ 2018-02-16 14:14 UTC (permalink / raw)
  To: linux-arm-kernel

ARMv8R/M architecture defines new memory protection scheme - PMSAv8
which is not compatible with PMSAv7.

Key differences to PMSAv7 are:
 - Region geometry is defined by base and limit addresses
 - Addresses need to be either 32 or 64 byte aligned
 - No region priority due to overlapping regions are not allowed
 - It is unified, i.e. no distinction between data/instruction regions
 - Memory attributes are controlled via MAIR

This patch implements support for PMSAv8 MPU defined by ARMv8R/M
architecture.

Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm/include/asm/mpu.h        |  52 ++++++-
 arch/arm/include/asm/v7m.h        |   8 +
 arch/arm/kernel/asm-offsets.c     |   2 +
 arch/arm/kernel/head-nommu.S      | 163 ++++++++++++++++++++
 arch/arm/kernel/vmlinux-xip.lds.S |   4 +
 arch/arm/kernel/vmlinux.lds.S     |   7 +
 arch/arm/mm/Makefile              |   2 +-
 arch/arm/mm/nommu.c               |   6 +
 arch/arm/mm/pmsa-v8.c             | 307 ++++++++++++++++++++++++++++++++++++++
 9 files changed, 547 insertions(+), 4 deletions(-)
 create mode 100644 arch/arm/mm/pmsa-v8.c

diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index fbde275..5e088c8 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -12,6 +12,7 @@
 /* ID_MMFR0 data relevant to MPU */
 #define MMFR0_PMSA		(0xF << 4)
 #define MMFR0_PMSAv7		(3 << 4)
+#define MMFR0_PMSAv8		(4 << 4)
 
 /* MPU D/I Size Register fields */
 #define PMSAv7_RSR_SZ		1
@@ -47,12 +48,43 @@
 #define PMSAv7_AP_PL1RW_PL0R0	(0x2 << 8)
 #define PMSAv7_AP_PL1RW_PL0NA	(0x1 << 8)
 
+#define PMSAv8_BAR_XN		1
+
+#define PMSAv8_LAR_EN		1
+#define PMSAv8_LAR_IDX(n)	(((n) & 0x7) << 1)
+
+
+#define PMSAv8_AP_PL1RW_PL0NA	(0 << 1)
+#define PMSAv8_AP_PL1RW_PL0RW	(1 << 1)
+#define PMSAv8_AP_PL1RO_PL0RO	(3 << 1)
+
+#ifdef CONFIG_SMP
+#define PMSAv8_RGN_SHARED	(3 << 3) // inner sharable
+#else
+#define PMSAv8_RGN_SHARED	(0 << 3)
+#endif
+
+#define PMSAv8_RGN_DEVICE_nGnRnE	0
+#define PMSAv8_RGN_NORMAL		1
+
+#define PMSAv8_MAIR(attr, mt)	((attr) << ((mt) * 8))
+
+#ifdef CONFIG_CPU_V7M
+#define PMSAv8_MINALIGN		32
+#else
+#define PMSAv8_MINALIGN		64
+#endif
+
 /* For minimal static MPU region configurations */
 #define PMSAv7_PROBE_REGION	0
 #define PMSAv7_BG_REGION	1
 #define PMSAv7_RAM_REGION	2
 #define PMSAv7_ROM_REGION	3
 
+/* Fixed for PMSAv8 only */
+#define PMSAv8_XIP_REGION	0
+#define PMSAv8_KERNEL_REGION	1
+
 /* Maximum number of regions Linux is interested in */
 #define MPU_MAX_REGIONS	16
 
@@ -63,9 +95,18 @@
 
 struct mpu_rgn {
 	/* Assume same attributes for d/i-side  */
-	u32 drbar;
-	u32 drsr;
-	u32 dracr;
+	union {
+		u32 drbar;   /* PMSAv7 */
+		u32 prbar;   /* PMSAv8 */
+	};
+	union {
+		u32 drsr;   /* PMSAv7 */
+		u32 prlar;  /* PMSAv8 */
+	};
+	union {
+		u32 dracr;  /* PMSAv7 */
+		u32 unused; /* not used in PMSAv8 */
+	};
 };
 
 struct mpu_rgn_info {
@@ -76,10 +117,15 @@ extern struct mpu_rgn_info mpu_rgn_info;
 
 #ifdef CONFIG_ARM_MPU
 extern void __init pmsav7_adjust_lowmem_bounds(void);
+extern void __init pmsav8_adjust_lowmem_bounds(void);
+
 extern void __init pmsav7_setup(void);
+extern void __init pmsav8_setup(void);
 #else
 static inline void pmsav7_adjust_lowmem_bounds(void) {};
+static inline void pmsav8_adjust_lowmem_bounds(void) {};
 static inline void pmsav7_setup(void) {};
+static inline void pmsav8_setup(void) {};
 #endif
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index aba49e0..187ccf6 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -68,6 +68,14 @@
 #define PMSAv7_RBAR		0x9c
 #define PMSAv7_RASR		0xa0
 
+#define PMSAv8_RNR		0x98
+#define PMSAv8_RBAR		0x9c
+#define PMSAv8_RLAR		0xa0
+#define PMSAv8_RBAR_A(n)	(PMSAv8_RBAR + 8*(n))
+#define PMSAv8_RLAR_A(n)	(PMSAv8_RLAR + 8*(n))
+#define PMSAv8_MAIR0		0xc0
+#define PMSAv8_MAIR1		0xc4
+
 /* Cache opeartions */
 #define	V7M_SCB_ICIALLU		0x250	/* I-cache invalidate all to PoU */
 #define	V7M_SCB_ICIMVAU		0x258	/* I-cache invalidate by MVA to PoU */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 250a985..27c5381 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -197,6 +197,8 @@ int main(void)
   DEFINE(MPU_RGN_DRBAR,	offsetof(struct mpu_rgn, drbar));
   DEFINE(MPU_RGN_DRSR,	offsetof(struct mpu_rgn, drsr));
   DEFINE(MPU_RGN_DRACR,	offsetof(struct mpu_rgn, dracr));
+  DEFINE(MPU_RGN_PRBAR,	offsetof(struct mpu_rgn, prbar));
+  DEFINE(MPU_RGN_PRLAR,	offsetof(struct mpu_rgn, prlar));
 #endif
   return 0; 
 }
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 482936a..84c7fa2 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -132,6 +132,25 @@ M_CLASS(ldr	r3, [r12, 0x50])
 AR_CLASS(mrc	p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
 	and	r3, r3, #(MMFR0_PMSA)           @ PMSA field
 	teq	r3, #(MMFR0_PMSAv7)             @ PMSA v7
+	beq	1f
+	teq	r3, #(MMFR0_PMSAv8)		@ PMSA v8
+	/*
+	 * Memory region attributes for PMSAv8:
+	 *
+	 *   n = AttrIndx[2:0]
+	 *                      n       MAIR
+	 *   DEVICE_nGnRnE      000     00000000
+	 *   NORMAL             001     11111111
+	 */
+	ldreq	r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
+		     PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
+AR_CLASS(mcreq	p15, 0, r3, c10, c2, 0)		@ MAIR 0
+M_CLASS(streq	r3, [r12, #PMSAv8_MAIR0])
+	moveq	r3, #0
+AR_CLASS(mcreq	p15, 0, r3, c10, c2, 1)		@ MAIR 1
+M_CLASS(streq	r3, [r12, #PMSAv8_MAIR1])
+
+1:
 #endif
 #ifdef CONFIG_CPU_CP15
 	/*
@@ -235,6 +254,8 @@ M_CLASS(ldr	r0, [r12, 0x50])
 	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
 	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
 	beq	__setup_pmsa_v7
+	teq	r0, #(MMFR0_PMSAv8)		@ PMSA v8
+	beq	__setup_pmsa_v8
 
 	ret	lr
 ENDPROC(__setup_mpu)
@@ -304,6 +325,119 @@ M_CLASS(ldr    r0, [r12, #MPU_TYPE])
 	ret	lr
 ENDPROC(__setup_pmsa_v7)
 
+ENTRY(__setup_pmsa_v8)
+	mov	r0, #0
+AR_CLASS(mcr	p15, 0, r0, c6, c2, 1)		@ PRSEL
+M_CLASS(str	r0, [r12, #PMSAv8_RNR])
+	isb
+
+#ifdef CONFIG_XIP_KERNEL
+	ldr	r5, =CONFIG_XIP_PHYS_ADDR		@ ROM start
+	ldr     r6, =(_exiprom)				@ ROM end
+	sub	r6, r6, #1
+	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
+
+	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr	p15, 0, r5, c6, c8, 0)			@ PRBAR0
+AR_CLASS(mcr	p15, 0, r6, c6, c8, 1)			@ PRLAR0
+M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(0)])
+M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(0)])
+#endif
+
+	ldr	r5, =KERNEL_START
+	ldr	r6, =KERNEL_END
+	sub	r6, r6, #1
+	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
+
+	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr	p15, 0, r5, c6, c8, 4)			@ PRBAR1
+AR_CLASS(mcr	p15, 0, r6, c6, c8, 5)			@ PRLAR1
+M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(1)])
+M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(1)])
+
+	/* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
+#ifdef CONFIG_XIP_KERNEL
+	ldr	r6, =KERNEL_START
+	ldr	r5, =CONFIG_XIP_PHYS_ADDR
+	cmp	r6, r5
+	movcs	r6, r5
+#else
+	ldr	r6, =KERNEL_START
+#endif
+	cmp	r6, #0
+	beq	1f
+
+	mov	r5, #0
+	sub	r6, r6, #1
+	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
+
+	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr	p15, 0, r5, c6, c9, 0)			@ PRBAR2
+AR_CLASS(mcr	p15, 0, r6, c6, c9, 1)			@ PRLAR2
+M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(2)])
+M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(2)])
+
+1:
+	/* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
+#ifdef CONFIG_XIP_KERNEL
+	ldr	r5, =KERNEL_END
+	ldr	r6, =(_exiprom)
+	cmp	r5, r6
+	movcc	r5, r6
+#else
+	ldr	r5, =KERNEL_END
+#endif
+	mov	r6, #0xffffffff
+	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
+
+	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr	p15, 0, r5, c6, c9, 4)			@ PRBAR3
+AR_CLASS(mcr	p15, 0, r6, c6, c9, 5)			@ PRLAR3
+M_CLASS(str	r5, [r12, #PMSAv8_RBAR_A(3)])
+M_CLASS(str	r6, [r12, #PMSAv8_RLAR_A(3)])
+
+#ifdef CONFIG_XIP_KERNEL
+	/* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
+	ldr	r5, =(_exiprom)
+	ldr	r6, =KERNEL_END
+	cmp	r5, r6
+	movcs	r5, r6
+
+	ldr	r6, =KERNEL_START
+	ldr	r0, =CONFIG_XIP_PHYS_ADDR
+	cmp	r6, r0
+	movcc	r6, r0
+
+	sub	r6, r6, #1
+	bic	r6, r6, #(PMSAv8_MINALIGN - 1)
+
+	orr	r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+	orr	r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+#ifdef CONFIG_CPU_V7M
+	/* There is no alias for n == 4 */
+	mov	r0, #4
+	str	r0, [r12, #PMSAv8_RNR]			@ PRSEL
+	isb
+
+	str	r5, [r12, #PMSAv8_RBAR_A(0)]
+	str	r6, [r12, #PMSAv8_RLAR_A(0)]
+#else
+	mcr	p15, 0, r5, c6, c10, 1			@ PRBAR4
+	mcr	p15, 0, r6, c6, c10, 2			@ PRLAR4
+#endif
+#endif
+	ret	lr
+ENDPROC(__setup_pmsa_v8)
+
 #ifdef CONFIG_SMP
 /*
  * r6: pointer@mpu_rgn_info
@@ -319,6 +453,8 @@ ENTRY(__secondary_setup_mpu)
 	and	r0, r0, #(MMFR0_PMSA)		@ PMSA field
 	teq	r0, #(MMFR0_PMSAv7)		@ PMSA v7
 	beq	__secondary_setup_pmsa_v7
+	teq	r0, #(MMFR0_PMSAv8)		@ PMSA v8
+	beq	__secondary_setup_pmsa_v8
  	b	__error_p
 ENDPROC(__secondary_setup_mpu)
 
@@ -361,6 +497,33 @@ ENTRY(__secondary_setup_pmsa_v7)
 	ret	lr
 ENDPROC(__secondary_setup_pmsa_v7)
 
+ENTRY(__secondary_setup_pmsa_v8)
+	ldr	r4, [r6, #MPU_RNG_INFO_USED]
+#ifndef CONFIG_XIP_KERNEL
+	add	r4, r4, #1
+#endif
+	mov	r5, #MPU_RNG_SIZE
+	add	r3, r6, #MPU_RNG_INFO_RNGS
+	mla	r3, r4, r5, r3
+
+1:
+	sub	r3, r3, #MPU_RNG_SIZE
+	sub	r4, r4, #1
+
+	mcr	p15, 0, r4, c6, c2, 1		@ PRSEL
+	isb
+
+	ldr	r5, [r3, #MPU_RGN_PRBAR]
+	ldr	r6, [r3, #MPU_RGN_PRLAR]
+
+	mcr	p15, 0, r5, c6, c3, 0		@ PRBAR
+	mcr	p15, 0, r6, c6, c3, 1           @ PRLAR
+
+	cmp	r4, #0
+	bgt	1b
+
+	ret	lr
+ENDPROC(__secondary_setup_pmsa_v8)
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_ARM_MPU */
 #include "head-common.S"
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 12b8759..acc6361 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -13,6 +13,7 @@
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
+#include <asm/mpu.h>
 #include <asm/page.h>
 
 #define PROC_INFO							\
@@ -292,6 +293,9 @@ SECTIONS
 #endif
 
 	BSS_SECTION(0, 0, 8)
+#ifdef CONFIG_ARM_MPU
+	. = ALIGN(PMSAv8_MINALIGN);
+#endif
 	_end = .;
 
 	STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 84a1ae3..0d6c469 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,6 +12,7 @@
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
+#include <asm/mpu.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
@@ -102,6 +103,9 @@ SECTIONS
 	. = ALIGN(1<<SECTION_SHIFT);
 #endif
 
+#ifdef CONFIG_ARM_MPU
+	. = ALIGN(PMSAv8_MINALIGN);
+#endif
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 			IDMAP_TEXT
@@ -295,6 +299,9 @@ SECTIONS
 #endif
 
 	BSS_SECTION(0, 0, 0)
+#ifdef CONFIG_ARM_MPU
+	. = ALIGN(PMSAv8_MINALIGN);
+#endif
 	_end = .;
 
 	STABS_DEBUG
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9dbb849..d19b209 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \
 
 ifneq ($(CONFIG_MMU),y)
 obj-y				+= nommu.o
-obj-$(CONFIG_ARM_MPU)		+= pmsa-v7.o
+obj-$(CONFIG_ARM_MPU)		+= pmsa-v7.o pmsa-v8.o
 endif
 
 obj-$(CONFIG_ARM_PTDUMP_CORE)	+= dump.o
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index edbaa47..5dd6c58 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -107,6 +107,9 @@ static void __init adjust_lowmem_bounds_mpu(void)
 	case MMFR0_PMSAv7:
 		pmsav7_adjust_lowmem_bounds();
 		break;
+	case MMFR0_PMSAv8:
+		pmsav8_adjust_lowmem_bounds();
+		break;
 	default:
 		break;
 	}
@@ -120,6 +123,9 @@ static void __init mpu_setup(void)
 	case MMFR0_PMSAv7:
 		pmsav7_setup();
 		break;
+	case MMFR0_PMSAv8:
+		pmsav8_setup();
+		break;
 	default:
 		break;
 	}
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
new file mode 100644
index 0000000..617a83d
--- /dev/null
+++ b/arch/arm/mm/pmsa-v8.c
@@ -0,0 +1,307 @@
+/*
+ * Based on linux/arch/arm/pmsa-v7.c
+ *
+ * ARM PMSAv8 supporting functions.
+ */
+
+#include <linux/memblock.h>
+#include <linux/range.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/mpu.h>
+
+#include <asm/memory.h>
+#include <asm/sections.h>
+
+#include "mm.h"
+
+#ifndef CONFIG_CPU_V7M
+
+#define PRSEL	__ACCESS_CP15(c6, 0, c2, 1)
+#define PRBAR	__ACCESS_CP15(c6, 0, c3, 0)
+#define PRLAR	__ACCESS_CP15(c6, 0, c3, 1)
+
+static inline u32 prlar_read(void)
+{
+	return read_sysreg(PRLAR);
+}
+
+static inline u32 prbar_read(void)
+{
+	return read_sysreg(PRBAR);
+}
+
+static inline void prsel_write(u32 v)
+{
+	write_sysreg(v, PRSEL);
+}
+
+static inline void prbar_write(u32 v)
+{
+	write_sysreg(v, PRBAR);
+}
+
+static inline void prlar_write(u32 v)
+{
+	write_sysreg(v, PRLAR);
+}
+#else
+
+static inline u32 prlar_read(void)
+{
+	return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
+}
+
+static inline u32 prbar_read(void)
+{
+	return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
+}
+
+static inline void prsel_write(u32 v)
+{
+	writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
+}
+
+static inline void prbar_write(u32 v)
+{
+	writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
+}
+
+static inline void prlar_write(u32 v)
+{
+	writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
+}
+
+#endif
+
+static struct range __initdata io[MPU_MAX_REGIONS];
+static struct range __initdata mem[MPU_MAX_REGIONS];
+
+static unsigned int __initdata mpu_max_regions;
+
+static __init bool is_region_fixed(int number)
+{
+	switch (number) {
+	case PMSAv8_XIP_REGION:
+	case PMSAv8_KERNEL_REGION:
+		return true;
+	default:
+		return false;
+	}
+}
+
+void __init pmsav8_adjust_lowmem_bounds(void)
+{
+	phys_addr_t mem_end;
+	struct memblock_region *reg;
+	bool first = true;
+
+	for_each_memblock(memory, reg) {
+		if (first) {
+			phys_addr_t phys_offset = PHYS_OFFSET;
+
+			/*
+			 * Initially only use memory continuous from
+			 * PHYS_OFFSET */
+			if (reg->base != phys_offset)
+				panic("First memory bank must be contiguous from PHYS_OFFSET");
+			mem_end = reg->base + reg->size;
+			first = false;
+		} else {
+			/*
+			 * memblock auto merges contiguous blocks, remove
+			 * all blocks afterwards in one go (we can't remove
+			 * blocks separately while iterating)
+			 */
+			pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
+				  &mem_end, &reg->base);
+			memblock_remove(reg->base, 0 - reg->base);
+			break;
+		}
+	}
+}
+
+static int __init __mpu_max_regions(void)
+{
+	static int max_regions;
+	u32 mpuir;
+
+	if (max_regions)
+		return max_regions;
+
+	mpuir = read_cpuid_mputype();
+
+	max_regions  = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
+
+	return max_regions;
+}
+
+static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
+{
+	if (number > mpu_max_regions
+	    || number >= MPU_MAX_REGIONS)
+		return -ENOENT;
+
+	dsb();
+	prsel_write(number);
+	isb();
+	prbar_write(bar);
+	prlar_write(lar);
+
+	mpu_rgn_info.rgns[number].prbar = bar;
+	mpu_rgn_info.rgns[number].prlar = lar;
+
+	mpu_rgn_info.used++;
+
+	return 0;
+}
+
+static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+	u32 bar, lar;
+
+	if (is_region_fixed(number))
+		return -EINVAL;
+
+	bar = start;
+	lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+
+	bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
+	lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+	return __pmsav8_setup_region(number, bar, lar);
+}
+
+static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+	u32 bar, lar;
+
+	if (is_region_fixed(number))
+		return -EINVAL;
+
+	bar = start;
+	lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+
+	bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
+	lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
+
+	return __pmsav8_setup_region(number, bar, lar);
+}
+
+static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+	u32 bar, lar;
+
+	if (!is_region_fixed(number))
+		return -EINVAL;
+
+	bar = start;
+	lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
+
+	bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
+	lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+	prsel_write(number);
+	isb();
+
+	if (prbar_read() != bar || prlar_read() != lar)
+		return -EINVAL;
+
+	/* Reserved region was set up early, we just need a record for secondaries */
+	mpu_rgn_info.rgns[number].prbar = bar;
+	mpu_rgn_info.rgns[number].prlar = lar;
+
+	mpu_rgn_info.used++;
+
+	return 0;
+}
+
+#ifndef CONFIG_CPU_V7M
+static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
+{
+	u32 bar, lar;
+
+	if (number == PMSAv8_KERNEL_REGION)
+		return -EINVAL;
+
+	bar = start;
+	lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
+
+	bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
+	lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
+
+	return __pmsav8_setup_region(number, bar, lar);
+}
+#endif
+
+void __init pmsav8_setup(void)
+{
+	int i, err = 0;
+	int region = PMSAv8_KERNEL_REGION;
+
+	/* How many regions are supported ? */
+	mpu_max_regions = __mpu_max_regions();
+
+	/* RAM: single chunk of memory */
+	add_range(mem,  ARRAY_SIZE(mem), 0,  memblock.memory.regions[0].base,
+		  memblock.memory.regions[0].base + memblock.memory.regions[0].size);
+
+	/* IO: cover full 4G range */
+	add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
+
+	/* RAM and IO: exclude kernel */
+	subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
+	subtract_range(io, ARRAY_SIZE(io),  __pa(KERNEL_START), __pa(KERNEL_END));
+
+#ifdef CONFIG_XIP_KERNEL
+	/* RAM and IO: exclude xip */
+	subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+	subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+#endif
+
+#ifndef CONFIG_CPU_V7M
+	/* RAM and IO: exclude vectors */
+	subtract_range(mem, ARRAY_SIZE(mem),  vectors_base, vectors_base + 2 * PAGE_SIZE);
+	subtract_range(io, ARRAY_SIZE(io),  vectors_base, vectors_base + 2 * PAGE_SIZE);
+#endif
+	/* IO: exclude RAM */
+	for (i = 0; i < ARRAY_SIZE(mem); i++)
+		subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
+
+	/* Now program MPU */
+
+#ifdef CONFIG_XIP_KERNEL
+	/* ROM */
+	err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
+#endif
+	/* Kernel */
+	err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
+
+
+	/* IO */
+	for (i = 0; i < ARRAY_SIZE(io); i++) {
+		if (!io[i].end)
+			continue;
+
+		err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
+	}
+
+	/* RAM */
+	for (i = 0; i < ARRAY_SIZE(mem); i++) {
+		if (!mem[i].end)
+			continue;
+
+		err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
+	}
+
+	/* Vectors */
+#ifndef CONFIG_CPU_V7M
+	err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
+#endif
+	if (err)
+		pr_warn("MPU region initialization failure! %d", err);
+	else
+		pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
+			mpu_rgn_info.used, mpu_max_regions);
+}
-- 
2.0.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 0/4] Introduce PMSAv8 memory protection unit
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
                   ` (3 preceding siblings ...)
  2018-02-16 14:14 ` [PATCH v2 4/4] ARM: NOMMU: Support PMSAv8 MPU Vladimir Murzin
@ 2018-03-08 13:36 ` Vladimir Murzin
  2018-03-27  9:06   ` Vladimir Murzin
  2018-03-27 13:25 ` Alexandre Torgue
  5 siblings, 1 reply; 9+ messages in thread
From: Vladimir Murzin @ 2018-03-08 13:36 UTC (permalink / raw)
  To: linux-arm-kernel

On 16/02/18 14:14, Vladimir Murzin wrote:
> Hi,
> 
> This series adds support for PMSAv8 MPU defined by ARMv8R/M
> architecture.
> 
> Changelog:
> 
>      v1 -> v2
>         - Included missed patch.
> 	- Folded "ARM: NOMMU: Make _stext and _end meet PMSAv8 alignment
> 	  restrictions" into "ARM: NOMMU: Support PMSAv8 MPU" since it
> 	  allows us to refer PMSAv8_MINALIGN in linker scripts.
> 	- Improved and tested XIP support
> 
>     RFC -> v1
>         - Rebased on v4.16-rc1
> 	- Added Tested-by for v7M bits from Andras
> 
> Thanks!

OK for patch tracker?

Vladimir

> 
> Vladimir Murzin (4):
>   ARM: NOMMU: Move PMSAv7 MPU under it's own namespace
>   ARM: NOMMU: Reorganise __setup_mpu
>   ARM: NOMMU: Postpone MPU activation till __after_proc_init
>   ARM: NOMMU: Support PMSAv8 MPU
> 
>  arch/arm/include/asm/mpu.h        | 112 +++++++++-----
>  arch/arm/include/asm/v7m.h        |  14 +-
>  arch/arm/kernel/asm-offsets.c     |   8 +-
>  arch/arm/kernel/head-nommu.S      | 289 ++++++++++++++++++++++++++++-------
>  arch/arm/kernel/vmlinux-xip.lds.S |   4 +
>  arch/arm/kernel/vmlinux.lds.S     |   7 +
>  arch/arm/mm/Makefile              |   2 +-
>  arch/arm/mm/nommu.c               |  32 ++++
>  arch/arm/mm/pmsa-v7.c             |  59 +++-----
>  arch/arm/mm/pmsa-v8.c             | 307 ++++++++++++++++++++++++++++++++++++++
>  10 files changed, 701 insertions(+), 133 deletions(-)
>  create mode 100644 arch/arm/mm/pmsa-v8.c
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 0/4] Introduce PMSAv8 memory protection unit
  2018-03-08 13:36 ` [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
@ 2018-03-27  9:06   ` Vladimir Murzin
  2018-03-27 14:03     ` Arnd Bergmann
  0 siblings, 1 reply; 9+ messages in thread
From: Vladimir Murzin @ 2018-03-27  9:06 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/03/18 13:36, Vladimir Murzin wrote:
> On 16/02/18 14:14, Vladimir Murzin wrote:
>> Hi,
>>
>> This series adds support for PMSAv8 MPU defined by ARMv8R/M
>> architecture.
>>
>> Changelog:
>>
>>      v1 -> v2
>>      - Included missed patch.
>> 	- Folded "ARM: NOMMU: Make _stext and _end meet PMSAv8 alignment
>> 	  restrictions" into "ARM: NOMMU: Support PMSAv8 MPU" since it
>> 	  allows us to refer PMSAv8_MINALIGN in linker scripts.
>> 	- Improved and tested XIP support
>>
>>     RFC -> v1
>>      - Rebased on v4.16-rc1
>> 	- Added Tested-by for v7M bits from Andras
>>
>> Thanks!
> 
> OK for patch tracker?

The patches were on the list for a month and half without much attention
except v7M folks (thanks them for testing!) and I'm afraid it would be even
harder to make progress with them in nearby feature.

I'm wondering if ARM SoC team can pick the patches?

Thanks
Vladimir

> 
> Vladimir
> 
>>
>> Vladimir Murzin (4):
>>   ARM: NOMMU: Move PMSAv7 MPU under it's own namespace
>>   ARM: NOMMU: Reorganise __setup_mpu
>>   ARM: NOMMU: Postpone MPU activation till __after_proc_init
>>   ARM: NOMMU: Support PMSAv8 MPU
>>
>>  arch/arm/include/asm/mpu.h        | 112 +++++++++-----
>>  arch/arm/include/asm/v7m.h        |  14 +-
>>  arch/arm/kernel/asm-offsets.c     |   8 +-
>>  arch/arm/kernel/head-nommu.S      | 289 ++++++++++++++++++++++++++++-------
>>  arch/arm/kernel/vmlinux-xip.lds.S |   4 +
>>  arch/arm/kernel/vmlinux.lds.S     |   7 +
>>  arch/arm/mm/Makefile              |   2 +-
>>  arch/arm/mm/nommu.c               |  32 ++++
>>  arch/arm/mm/pmsa-v7.c             |  59 +++-----
>>  arch/arm/mm/pmsa-v8.c             | 307 ++++++++++++++++++++++++++++++++++++++
>>  10 files changed, 701 insertions(+), 133 deletions(-)
>>  create mode 100644 arch/arm/mm/pmsa-v8.c
>>
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 0/4] Introduce PMSAv8 memory protection unit
  2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
                   ` (4 preceding siblings ...)
  2018-03-08 13:36 ` [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
@ 2018-03-27 13:25 ` Alexandre Torgue
  5 siblings, 0 replies; 9+ messages in thread
From: Alexandre Torgue @ 2018-03-27 13:25 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Vlad,

On 02/16/2018 03:14 PM, Vladimir Murzin wrote:
> Hi,
> 
> This series adds support for PMSAv8 MPU defined by ARMv8R/M
> architecture.
> 
> Changelog:
> 
>       v1 -> v2
>          - Included missed patch.
> 	- Folded "ARM: NOMMU: Make _stext and _end meet PMSAv8 alignment
> 	  restrictions" into "ARM: NOMMU: Support PMSAv8 MPU" since it
> 	  allows us to refer PMSAv8_MINALIGN in linker scripts.
> 	- Improved and tested XIP support
> 
>      RFC -> v1
>          - Rebased on v4.16-rc1
> 	- Added Tested-by for v7M bits from Andras
> 
> Thanks!
> 
> Vladimir Murzin (4):
>    ARM: NOMMU: Move PMSAv7 MPU under it's own namespace
>    ARM: NOMMU: Reorganise __setup_mpu
>    ARM: NOMMU: Postpone MPU activation till __after_proc_init
>    ARM: NOMMU: Support PMSAv8 MPU
> 
>   arch/arm/include/asm/mpu.h        | 112 +++++++++-----
>   arch/arm/include/asm/v7m.h        |  14 +-
>   arch/arm/kernel/asm-offsets.c     |   8 +-
>   arch/arm/kernel/head-nommu.S      | 289 ++++++++++++++++++++++++++++-------
>   arch/arm/kernel/vmlinux-xip.lds.S |   4 +
>   arch/arm/kernel/vmlinux.lds.S     |   7 +
>   arch/arm/mm/Makefile              |   2 +-
>   arch/arm/mm/nommu.c               |  32 ++++
>   arch/arm/mm/pmsa-v7.c             |  59 +++-----
>   arch/arm/mm/pmsa-v8.c             | 307 ++++++++++++++++++++++++++++++++++++++
>   10 files changed, 701 insertions(+), 133 deletions(-)
>   create mode 100644 arch/arm/mm/pmsa-v8.c
> 

Just tested on stm32f769-disco board on stm32-next branch. I don't see 
regressions.

Tested-by: Alexandre TORGUE <alexandre.torgue@st.com>

Thanks

Alex

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 0/4] Introduce PMSAv8 memory protection unit
  2018-03-27  9:06   ` Vladimir Murzin
@ 2018-03-27 14:03     ` Arnd Bergmann
  0 siblings, 0 replies; 9+ messages in thread
From: Arnd Bergmann @ 2018-03-27 14:03 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Mar 27, 2018 at 11:06 AM, Vladimir Murzin
<vladimir.murzin@arm.com> wrote:
> On 08/03/18 13:36, Vladimir Murzin wrote:
>> On 16/02/18 14:14, Vladimir Murzin wrote:
>>> Hi,
>>>
>>> This series adds support for PMSAv8 MPU defined by ARMv8R/M
>>> architecture.
>>>
>>> Changelog:
>>>
>>>      v1 -> v2
>>>      - Included missed patch.
>>>      - Folded "ARM: NOMMU: Make _stext and _end meet PMSAv8 alignment
>>>        restrictions" into "ARM: NOMMU: Support PMSAv8 MPU" since it
>>>        allows us to refer PMSAv8_MINALIGN in linker scripts.
>>>      - Improved and tested XIP support
>>>
>>>     RFC -> v1
>>>      - Rebased on v4.16-rc1
>>>      - Added Tested-by for v7M bits from Andras
>>>
>>> Thanks!
>>
>> OK for patch tracker?
>
> The patches were on the list for a month and half without much attention
> except v7M folks (thanks them for testing!) and I'm afraid it would be even
> harder to make progress with them in nearby feature.
>
> I'm wondering if ARM SoC team can pick the patches?

This is really core code that should go through Russell's tree.

      Arnd

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-03-27 14:03 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-16 14:14 [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
2018-02-16 14:14 ` [PATCH v2 1/4] ARM: NOMMU: Move PMSAv7 MPU under it's own namespace Vladimir Murzin
2018-02-16 14:14 ` [PATCH v2 2/4] ARM: NOMMU: Reorganise __setup_mpu Vladimir Murzin
2018-02-16 14:14 ` [PATCH v2 3/4] ARM: NOMMU: Postpone MPU activation till __after_proc_init Vladimir Murzin
2018-02-16 14:14 ` [PATCH v2 4/4] ARM: NOMMU: Support PMSAv8 MPU Vladimir Murzin
2018-03-08 13:36 ` [PATCH v2 0/4] Introduce PMSAv8 memory protection unit Vladimir Murzin
2018-03-27  9:06   ` Vladimir Murzin
2018-03-27 14:03     ` Arnd Bergmann
2018-03-27 13:25 ` Alexandre Torgue

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.