All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support.
@ 2010-02-05 23:26 David Daney
  2010-02-05 23:27 ` [PATCH 1/4] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels David Daney
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: David Daney @ 2010-02-05 23:26 UTC (permalink / raw)
  To: Ralf Baechle, linux-mips

This patch set adds execute and read inhibit support.  By default glibc
based tool chains will create mappings for data areas of a program and
shared libraries with PROT_EXEC cleared.  With this patch applied, a
SIGSEGV is correctly sent if an attempt is made to execute from data
areas.

We have been running this patch for close to a year.  So far it seems
to work well, so I ported it to the HEAD for your enjoyment.

I will reply with the four patches.

David Daney (4):
   MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels.
   MIPS: Add accessor functions and bit definitions for c0_PageGrain
   MIPS: Add TLBP to uasm.
   MIPS: Implement Read Inhibit/eXecute Inhibit

  arch/mips/Kconfig                    |    7 ++
  arch/mips/include/asm/mipsregs.h     |   11 +++
  arch/mips/include/asm/pgtable-64.h   |    4 +
  arch/mips/include/asm/pgtable-bits.h |   59 ++++++++++++-
  arch/mips/include/asm/pgtable.h      |   39 ++++++++-
  arch/mips/include/asm/uasm.h         |    1 +
  arch/mips/mm/cache.c                 |   11 +++
  arch/mips/mm/fault.c                 |   23 +++++
  arch/mips/mm/init.c                  |    2 +-
  arch/mips/mm/tlb-r4k.c               |   15 +++-
  arch/mips/mm/tlbex.c                 |  165 
++++++++++++++++++++++++++++-----
  arch/mips/mm/uasm.c                  |    5 +-
  12 files changed, 308 insertions(+), 34 deletions(-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/4] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels.
  2010-02-05 23:26 [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support David Daney
@ 2010-02-05 23:27 ` David Daney
  2010-02-05 23:27 ` [PATCH 2/4] MIPS: Add accessor functions and bit definitions for c0_PageGrain David Daney
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: David Daney @ 2010-02-05 23:27 UTC (permalink / raw)
  To: linux-mips, ralf; +Cc: David Daney

64-bit CPUs have 64-bit c0_entrylo{0,1} registers.  We should use the
64-bit dmtc0 instruction to set them.  This becomes important if we
want to set the RI and XI bits present in some processors.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
---
 arch/mips/mm/tlbex.c |   20 ++++++++++----------
 1 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2c68849..35431e1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -460,14 +460,14 @@ static __cpuinit void build_huge_update_entries(u32 **p,
 		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
 	UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
-	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
+	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 	/* convert to entrylo1 */
 	if (small_sequence)
 		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 	else
 		UASM_i_ADDU(p, pte, pte, tmp);
 
-	uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
+	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 }
 
 static __cpuinit void build_huge_handler_tail(u32 **p,
@@ -686,18 +686,18 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
 		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
 		uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
-		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 		uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
-		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 	} else {
 		int pte_off_even = sizeof(pte_t) / 2;
 		int pte_off_odd = pte_off_even + sizeof(pte_t);
 
 		/* The pte entries are pre-shifted */
 		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
-		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
-		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 	}
 #else
 	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -706,14 +706,14 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 		build_tlb_probe_entry(p);
 	UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
 	if (r4k_250MHZhwbug())
-		uasm_i_mtc0(p, 0, C0_ENTRYLO0);
-	uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 	UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
 	if (r45k_bvahwbug())
 		uasm_i_mfc0(p, tmp, C0_INDEX);
 	if (r4k_250MHZhwbug())
-		uasm_i_mtc0(p, 0, C0_ENTRYLO1);
-	uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
+	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 #endif
 }
 
-- 
1.6.0.6

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] MIPS: Add accessor functions and bit definitions for c0_PageGrain
  2010-02-05 23:26 [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support David Daney
  2010-02-05 23:27 ` [PATCH 1/4] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels David Daney
@ 2010-02-05 23:27 ` David Daney
  2010-02-05 23:27 ` [PATCH 3/4] MIPS: Add TLBP to uasm David Daney
  2010-02-05 23:27 ` [PATCH 4/4] MIPS: Implement Read Inhibit/eXecute Inhibit David Daney
  3 siblings, 0 replies; 7+ messages in thread
From: David Daney @ 2010-02-05 23:27 UTC (permalink / raw)
  To: linux-mips, ralf; +Cc: David Daney

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
---
 arch/mips/include/asm/mipsregs.h |   11 +++++++++++
 1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index b30819c..9893758 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -251,6 +251,14 @@
 #define PL_256M		28
 
 /*
+ * PageGrain bits
+ */
+#define PG_RIE		(_ULCAST_(1) <<  31)
+#define PG_XIE		(_ULCAST_(1) <<  30)
+#define PG_ELPA		(_ULCAST_(1) <<  29)
+#define PG_ESP		(_ULCAST_(1) <<  28)
+
+/*
  * R4x00 interrupt enable / cause bits
  */
 #define IE_SW0          (_ULCAST_(1) <<  8)
@@ -840,6 +848,9 @@ do {									\
 #define read_c0_pagemask()	__read_32bit_c0_register($5, 0)
 #define write_c0_pagemask(val)	__write_32bit_c0_register($5, 0, val)
 
+#define read_c0_pagegrain()	__read_32bit_c0_register($5, 1)
+#define write_c0_pagegrain(val)	__write_32bit_c0_register($5, 1, val)
+
 #define read_c0_wired()		__read_32bit_c0_register($6, 0)
 #define write_c0_wired(val)	__write_32bit_c0_register($6, 0, val)
 
-- 
1.6.0.6

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] MIPS: Add TLBP to uasm.
  2010-02-05 23:26 [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support David Daney
  2010-02-05 23:27 ` [PATCH 1/4] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels David Daney
  2010-02-05 23:27 ` [PATCH 2/4] MIPS: Add accessor functions and bit definitions for c0_PageGrain David Daney
@ 2010-02-05 23:27 ` David Daney
  2010-02-08 10:58   ` Sergei Shtylyov
  2010-02-05 23:27 ` [PATCH 4/4] MIPS: Implement Read Inhibit/eXecute Inhibit David Daney
  3 siblings, 1 reply; 7+ messages in thread
From: David Daney @ 2010-02-05 23:27 UTC (permalink / raw)
  To: linux-mips, ralf; +Cc: David Daney

The soon to follow Read Inhibit/eXecute Inhibit patch needs TLBP
support in uasm.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
---
 arch/mips/include/asm/uasm.h |    1 +
 arch/mips/mm/uasm.c          |    5 ++++-
 2 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 3d153ed..b18588b 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -95,6 +95,7 @@ Ip_u2u1u3(_srl);
 Ip_u3u1u2(_subu);
 Ip_u2s3u1(_sw);
 Ip_0(_tlbp);
+Ip_0(_tlbr);
 Ip_0(_tlbwi);
 Ip_0(_tlbwr);
 Ip_u3u1u2(_xor);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index e3ca0f7..8f4f14d 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,7 +63,8 @@ enum opcode {
 	insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
 	insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
 	insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
-	insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins
+	insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
+	insn_dins
 };
 
 struct insn {
@@ -128,6 +129,7 @@ static struct insn insn_table[] __cpuinitdata = {
 	{ insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
 	{ insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
+	{ insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
 	{ insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
 	{ insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
 	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
@@ -381,6 +383,7 @@ I_u2u1u3(_srl)
 I_u3u1u2(_subu)
 I_u2s3u1(_sw)
 I_0(_tlbp)
+I_0(_tlbr)
 I_0(_tlbwi)
 I_0(_tlbwr)
 I_u3u1u2(_xor)
-- 
1.6.0.6

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] MIPS: Implement Read Inhibit/eXecute Inhibit
  2010-02-05 23:26 [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support David Daney
                   ` (2 preceding siblings ...)
  2010-02-05 23:27 ` [PATCH 3/4] MIPS: Add TLBP to uasm David Daney
@ 2010-02-05 23:27 ` David Daney
  3 siblings, 0 replies; 7+ messages in thread
From: David Daney @ 2010-02-05 23:27 UTC (permalink / raw)
  To: linux-mips, ralf; +Cc: David Daney

The SmartMIPS ASE specifies how Read Inhibit (RI) and eXecute Inhibit
(XI) bits in the page tables work.  The upper two bits of EntryLo{0,1}
are RI and XI when the feature is enabled in the PageGrain register.
SmartMIPS only covers 32-bit systems.  Cavium Octeon+ extends this to
64-bit systems by continuing to place the RI and XI bits in the top of
EntryLo even when EntryLo is 64-bits wide.

This patch only targets Octeon+, but should be trivial to adapt for
and 32-bit SmartMIPS system.

Because we need to carry the RI and XI bits in the PTE, the layout of
the PTE is changed.  There is a two instruction overhead in the TLB
refill hot path to get the EntryLo bits into the proper position.
Also the TLB load exception has to probe the TLB to check if RI or XI
caused the exception.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
---
 arch/mips/Kconfig                    |    7 ++
 arch/mips/include/asm/pgtable-64.h   |    4 +
 arch/mips/include/asm/pgtable-bits.h |   59 ++++++++++++++-
 arch/mips/include/asm/pgtable.h      |   39 +++++++++-
 arch/mips/mm/cache.c                 |   11 +++
 arch/mips/mm/fault.c                 |   23 ++++++
 arch/mips/mm/init.c                  |    2 +-
 arch/mips/mm/tlb-r4k.c               |   15 +++-
 arch/mips/mm/tlbex.c                 |  145 ++++++++++++++++++++++++++++++----
 9 files changed, 282 insertions(+), 23 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ed8d5b5..a79c424 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1484,6 +1484,13 @@ config 64BIT
 
 endchoice
 
+config USE_RI_XI_PAGE_BITS
+	bool "Use Read Inhibit (RI) and eXecute Inhibit (XI) page bits"
+	depends on CPU_CAVIUM_OCTEON
+	help
+	  This option enables the kernel to enforce PROT_EXEC and
+	  PROT_READ memory protection in mapped memory.
+
 choice
 	prompt "Kernel page size"
 	default PAGE_SIZE_4KB
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 2c1d194..9606d26 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -215,6 +215,10 @@ static inline void pud_clear(pud_t *pudp)
 #ifdef CONFIG_CPU_VR41XX
 #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
 #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
+#elif defined(_PAGE_NO_EXEC)
+/* The NO_READ and NO_EXEC added an extra two bits */
+#define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
+#define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
 #else
 #define pte_pfn(x)		((unsigned long)((x).pte >> PAGE_SHIFT))
 #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 1073e6d..637d4f6 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -50,7 +50,48 @@
 #define _CACHE_SHIFT                3
 #define _CACHE_MASK                 (7<<3)
 
-#else
+#elif defined(CONFIG_USE_RI_XI_PAGE_BITS)
+
+/*
+ * When using the RI/XI bit support, we have 14 bits of flags below
+ * the physical address. The RI/XI bits are places such that a SRL 6
+ * can strip off the software bits, then a ROTR 2 can move the RI/XI
+ * into bits [63:62]. This also limits physical address to 56 bits,
+ * which is more than we need right now. Octeon CSRs use 48 bits.
+ */
+#define _PAGE_PRESENT               (1<<0)  /* implemented in software */
+#define _PAGE_WRITE                 (1<<2)  /* implemented in software */
+#define _PAGE_ACCESSED              (1<<3)  /* implemented in software */
+#define _PAGE_MODIFIED              (1<<4)  /* implemented in software */
+#define _PAGE_FILE                  (1<<4)  /* set:pagecache unset:swap */
+#define _PAGE_HUGE                  (1<<5)  /* huge tlb page */
+#define _PAGE_NO_EXEC               (1<<6)  /* Page cannot be executed */
+#define _PAGE_NO_READ               (1<<7)  /* Page cannot be read */
+#define _PAGE_GLOBAL                (1<<8)
+#define _PAGE_VALID                 (1<<9)
+#define _PAGE_SILENT_READ           (1<<9)  /* synonym                 */
+#define _PAGE_DIRTY                 (1<<10) /* The MIPS dirty bit      */
+#define _PAGE_SILENT_WRITE          (1<<10)
+#define _CACHE_SHIFT                11
+#define _CACHE_MASK                 (7<<_CACHE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * pte_to_entrylo converts a page table entry (PTE) into a Mips
+ * entrylo0/1 value.
+ */
+static inline uint64_t pte_to_entrylo(unsigned long pte_val)
+{
+	/*
+	 * C has no way to express that this is a DSRL 6 followed by a
+	 * ROTR 2.  Luckily in the fast path this is done in
+	 * assembly
+	 */
+	return (pte_val >> 8) | ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << 56);
+}
+#endif
+
+#else /* !CONFIG_USE_RI_XI_PAGE_BITS */
 
 #define _PAGE_PRESENT               (1<<0)  /* implemented in software */
 #define _PAGE_READ                  (1<<1)  /* implemented in software */
@@ -82,6 +123,18 @@
 #define _CACHE_MASK                 (7<<9)
 
 #endif
+
+#ifndef __ASSEMBLY__
+/*
+ * pte_to_entrylo converts a page table entry (PTE) into a Mips
+ * entrylo0/1 value.
+ */
+static inline uint64_t pte_to_entrylo(unsigned long pte_val)
+{
+	return pte_val >> 6;
+}
+#endif
+
 #endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
 
 
@@ -130,7 +183,11 @@
 
 #endif
 
+#ifdef _PAGE_NO_READ
+#define __READABLE	(_PAGE_SILENT_READ | _PAGE_ACCESSED)
+#else
 #define __READABLE	(_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
+#endif
 #define __WRITEABLE	(_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
 
 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 02335fd..9c2e5c9 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -21,6 +21,31 @@
 struct mm_struct;
 struct vm_area_struct;
 
+#ifdef _PAGE_NO_READ
+#define PAGE_BASE_FLAGS (_PAGE_PRESENT | _page_cachable_default)
+#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
+#define PAGE_SHARED	__pgprot(PAGE_BASE_FLAGS | _PAGE_WRITE)
+#define PAGE_COPY __pgprot(PAGE_BASE_FLAGS | _PAGE_NO_EXEC)
+#define PAGE_READONLY __pgprot(PAGE_BASE_FLAGS)
+#define PAGE_KERNEL __pgprot(PAGE_BASE_FLAGS | __READABLE | __WRITEABLE | _PAGE_GLOBAL)
+#define __P000	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __P001	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P010	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __P011	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __P100	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ)
+#define __P101	__pgprot(_PAGE_PRESENT)
+#define __P110	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ)
+#define __P111	__pgprot(_PAGE_PRESENT)
+
+#define __S000	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ)
+#define __S001	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC)
+#define __S010	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ)
+#define __S011	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
+#define __S100	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ)
+#define __S101	__pgprot(_PAGE_PRESENT)
+#define __S110	__pgprot(_PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ)
+#define __S111	__pgprot(_PAGE_PRESENT | _PAGE_WRITE)
+#else
 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
 				 _page_cachable_default)
@@ -36,9 +61,10 @@ struct vm_area_struct;
 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
 
 /*
- * MIPS can't do page protection for execute, and considers that the same like
- * read. Also, write permissions imply read permissions. This is the closest
- * we can get by reasonable means..
+ * If _PAGE_NO_EXEC is not defined, we can't do page protection for
+ * execute, and consider it to be the same as read. Also, write
+ * permissions imply read permissions. This is the closest we can get
+ * by reasonable means..
  */
 
 /*
@@ -63,6 +89,8 @@ struct vm_area_struct;
 #define __S110 __pgprot(0)
 #define __S111 __pgprot(0)
 
+#endif
+
 extern unsigned long _page_cachable_default;
 
 /*
@@ -298,8 +326,13 @@ static inline pte_t pte_mkdirty(pte_t pte)
 static inline pte_t pte_mkyoung(pte_t pte)
 {
 	pte_val(pte) |= _PAGE_ACCESSED;
+#ifdef _PAGE_NO_READ
+	if (!(pte_val(pte) & _PAGE_NO_READ))
+		pte_val(pte) |= _PAGE_SILENT_READ;
+#else
 	if (pte_val(pte) & _PAGE_READ)
 		pte_val(pte) |= _PAGE_SILENT_READ;
+#endif
 	return pte;
 }
 
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index e716caf..31d7f0f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -137,6 +137,16 @@ EXPORT_SYMBOL_GPL(_page_cachable_default);
 
 static inline void setup_protection_map(void)
 {
+#ifdef _PAGE_NO_READ
+	/*
+	 * It was statically initialized with everything but the
+	 * _page_cachable_default bits.
+	 */
+	int i;
+	for (i = 0; i < 16; i++)
+		protection_map[i] = __pgprot(pgprot_val(protection_map[i]) |
+					_page_cachable_default);
+#else
 	protection_map[0] = PAGE_NONE;
 	protection_map[1] = PAGE_READONLY;
 	protection_map[2] = PAGE_COPY;
@@ -153,6 +163,7 @@ static inline void setup_protection_map(void)
 	protection_map[13] = PAGE_READONLY;
 	protection_map[14] = PAGE_SHARED;
 	protection_map[15] = PAGE_SHARED;
+#endif
 }
 
 void __cpuinit cpu_cache_init(void)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e97a7a2..24990be 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -99,8 +99,31 @@ good_area:
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
 	} else {
+#ifdef _PAGE_NO_READ
+		if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
+#if 0
+			pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
+				  raw_smp_processor_id(),
+				  current->comm, current->pid,
+				  field, address, write,
+				  field, regs->cp0_epc);
+#endif
+			goto bad_area;
+		}
+		if (!(vma->vm_flags & VM_READ)) {
+#if 0
+			pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
+				  raw_smp_processor_id(),
+				  current->comm, current->pid,
+				  field, address, write,
+				  field, regs->cp0_epc);
+#endif
+			goto bad_area;
+		}
+#else
 		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
 			goto bad_area;
+#endif
 	}
 
 	/*
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3c5b7de..9a8e9f1 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -143,7 +143,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 	entrylo = pte.pte_high;
 #else
-	entrylo = pte_val(pte) >> 6;
+	entrylo = pte_to_entrylo(pte.pte);
 #endif
 
 	ENTER_CRITICAL(flags);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e551559..c9ae7a8 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 		unsigned long lo;
 		write_c0_pagemask(PM_HUGE_MASK);
 		ptep = (pte_t *)pmdp;
-		lo = pte_val(*ptep) >> 6;
+		lo = pte_to_entrylo(pte_val(*ptep));
 		write_c0_entrylo0(lo);
 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 		ptep++;
 		write_c0_entrylo1(ptep->pte_high);
 #else
-		write_c0_entrylo0(pte_val(*ptep++) >> 6);
-		write_c0_entrylo1(pte_val(*ptep) >> 6);
+		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
+		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 #endif
 		mtc0_tlbw_hazard();
 		if (idx < 0)
@@ -439,6 +439,15 @@ void __cpuinit tlb_init(void)
 	    current_cpu_type() == CPU_R12000 ||
 	    current_cpu_type() == CPU_R14000)
 		write_c0_framemask(0);
+
+#ifdef _PAGE_NO_READ
+	/*
+	 * Enable the no read, no exec bits, and enable large virtual
+	 * address.
+	 */
+	write_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#endif
+
 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
 
         /* From this point on the ARC firmware is dead.  */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 35431e1..3ee26aa 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -76,6 +76,8 @@ enum label_id {
 	label_vmalloc_done,
 	label_tlbw_hazard,
 	label_split,
+	label_tlbl_goaround1,
+	label_tlbl_goaround2,
 	label_nopage_tlbl,
 	label_nopage_tlbs,
 	label_nopage_tlbm,
@@ -92,6 +94,8 @@ UASM_L_LA(_vmalloc)
 UASM_L_LA(_vmalloc_done)
 UASM_L_LA(_tlbw_hazard)
 UASM_L_LA(_split)
+UASM_L_LA(_tlbl_goaround1)
+UASM_L_LA(_tlbl_goaround2)
 UASM_L_LA(_nopage_tlbl)
 UASM_L_LA(_nopage_tlbs)
 UASM_L_LA(_nopage_tlbm)
@@ -397,6 +401,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
+
+static __cpuinit void build_restore_pagemask(u32 **p,
+					     struct uasm_reloc **r,
+					     unsigned int tmp,
+					     enum label_id lid)
+{
+	/* Reset default page size */
+	if (PM_DEFAULT_MASK >> 16) {
+		uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+		uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+		uasm_il_b(p, r, lid);
+		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+	} else if (PM_DEFAULT_MASK) {
+		uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+		uasm_il_b(p, r, lid);
+		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+	} else {
+		uasm_il_b(p, r, lid);
+		uasm_i_mtc0(p, 0, C0_PAGEMASK);
+	}
+}
+
 static __cpuinit void build_huge_tlb_write_entry(u32 **p,
 						 struct uasm_label **l,
 						 struct uasm_reloc **r,
@@ -410,20 +436,7 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
 
 	build_tlb_write_entry(p, l, r, wmode);
 
-	/* Reset default page size */
-	if (PM_DEFAULT_MASK >> 16) {
-		uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
-		uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
-		uasm_il_b(p, r, label_leave);
-		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
-	} else if (PM_DEFAULT_MASK) {
-		uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
-		uasm_il_b(p, r, label_leave);
-		uasm_i_mtc0(p, tmp, C0_PAGEMASK);
-	} else {
-		uasm_il_b(p, r, label_leave);
-		uasm_i_mtc0(p, 0, C0_PAGEMASK);
-	}
+	build_restore_pagemask(p, r, tmp, label_leave);
 }
 
 /*
@@ -459,7 +472,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
 	if (!small_sequence)
 		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
-	UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
+	build_convert_pte_to_entrylo(p, pte);
 	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 	/* convert to entrylo1 */
 	if (small_sequence)
@@ -674,6 +687,19 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
 	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 }
 
+static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+								  unsigned int reg)
+{
+#ifdef _PAGE_NO_READ
+	uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+	uasm_i_drotr(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+#elif defined(CONFIG_64BIT_PHYS_ADDR)
+	uasm_i_dsrl(p, reg, reg, 6);
+#else
+	uasm_i_SRL(p, reg, reg, 6);
+#endif
+}
+
 static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 					unsigned int ptep)
 {
@@ -685,9 +711,17 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 	if (cpu_has_64bits) {
 		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
 		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+#ifdef _PAGE_NO_READ
+		uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+		uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+		uasm_i_drotr(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+		uasm_i_drotr(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+#else
 		uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 		uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
+#endif
 		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 	} else {
 		int pte_off_even = sizeof(pte_t) / 2;
@@ -704,6 +738,15 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
 	if (r45k_bvahwbug())
 		build_tlb_probe_entry(p);
+#ifdef _PAGE_NO_READ
+	uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+	uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+	uasm_i_drotr(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+	if (r4k_250MHZhwbug())
+		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+	uasm_i_drotr(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+#else
 	UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
 	if (r4k_250MHZhwbug())
 		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
@@ -711,6 +754,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 	UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
 	if (r45k_bvahwbug())
 		uasm_i_mfc0(p, tmp, C0_INDEX);
+#endif
 	if (r4k_250MHZhwbug())
 		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
 	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
@@ -989,9 +1033,14 @@ static void __cpuinit
 build_pte_present(u32 **p, struct uasm_reloc **r,
 		  unsigned int pte, unsigned int ptr, enum label_id lid)
 {
+#ifdef _PAGE_NO_READ
+	uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
+	uasm_il_beqz(p, r, pte, lid);
+#else
 	uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
 	uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
 	uasm_il_bnez(p, r, pte, lid);
+#endif
 	iPTE_LW(p, pte, ptr);
 }
 
@@ -1279,6 +1328,36 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
 	build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
 	if (m4kc_tlbp_war())
 		build_tlb_probe_entry(&p);
+
+#ifdef _PAGE_NO_READ
+	/*
+	 * If the page is not _PAGE_VALID, RI or XI could not have
+	 * triggered it.  Skip the expensive test..
+	 */
+	uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+	uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+
+	uasm_i_nop(&p);
+	uasm_i_tlbr(&p);
+	/* Examine  entrylo 0 or 1 based on ptr. */
+	uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+	uasm_i_beqz(&p, K0, 8);
+
+	UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+	UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+	/*
+	 * If the entryLo (now in K0) is valid (bit 1), RI or XI must
+	 * have triggered it.
+	 */
+	uasm_i_andi(&p, K0, K0, 2);
+	uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
+
+
+	uasm_l_tlbl_goaround1(&l, p);
+	/* Reload the PTE value */
+	iPTE_LW(&p, K0, K1);
+
+#endif
 	build_make_valid(&p, &r, K0, K1);
 	build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
 
@@ -1291,6 +1370,42 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
 	iPTE_LW(&p, K0, K1);
 	build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
 	build_tlb_probe_entry(&p);
+
+#ifdef _PAGE_NO_READ
+	/*
+	 * If the page is not _PAGE_VALID, RI or XI could not have
+	 * triggered it.  Skip the expensive test..
+	 */
+	uasm_i_andi(&p, K0, K0, _PAGE_VALID);
+	uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+
+	uasm_i_nop(&p);
+	uasm_i_tlbr(&p);
+	/* Examine  entrylo 0 or 1 based on ptr. */
+	uasm_i_andi(&p, K0, K1, sizeof(pte_t));
+	uasm_i_beqz(&p, K0, 8);
+
+	UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
+	UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
+	/*
+	 * If the entryLo (now in K0) is valid (bit 1), RI or XI must
+	 * have triggered it.
+	 */
+	uasm_i_andi(&p, K0, K0, 2);
+	uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
+	/* Reload the PTE value */
+	iPTE_LW(&p, K0, K1);
+
+	/*
+	 * We clobbered C0_PAGEMASK, restore it.  On the other branch
+	 * it is restored in build_huge_tlb_write_entry.
+	 */
+	build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0);
+
+	uasm_l_tlbl_goaround2(&l, p);
+
+#endif
+
 	uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
 	build_huge_handler_tail(&p, &r, &l, K0, K1);
 #endif
-- 
1.6.0.6

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/4] MIPS: Add TLBP to uasm.
  2010-02-05 23:27 ` [PATCH 3/4] MIPS: Add TLBP to uasm David Daney
@ 2010-02-08 10:58   ` Sergei Shtylyov
  2010-02-08 17:19     ` David Daney
  0 siblings, 1 reply; 7+ messages in thread
From: Sergei Shtylyov @ 2010-02-08 10:58 UTC (permalink / raw)
  To: David Daney; +Cc: linux-mips, ralf

Hello.

David Daney wrote:

> The soon to follow Read Inhibit/eXecute Inhibit patch needs TLBP
>   

  But you're adding TLBR support, not TLBP?

> support in uasm.
>
> Signed-off-by: David Daney <ddaney@caviumnetworks.com>
> ---
>  arch/mips/include/asm/uasm.h |    1 +
>  arch/mips/mm/uasm.c          |    5 ++++-
>  2 files changed, 5 insertions(+), 1 deletions(-)
>
> diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
> index 3d153ed..b18588b 100644
> --- a/arch/mips/include/asm/uasm.h
> +++ b/arch/mips/include/asm/uasm.h
> @@ -95,6 +95,7 @@ Ip_u2u1u3(_srl);
>  Ip_u3u1u2(_subu);
>  Ip_u2s3u1(_sw);
>  Ip_0(_tlbp);
> +Ip_0(_tlbr);
>  Ip_0(_tlbwi);
>  Ip_0(_tlbwr);
>  Ip_u3u1u2(_xor);
> diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
> index e3ca0f7..8f4f14d 100644
> --- a/arch/mips/mm/uasm.c
> +++ b/arch/mips/mm/uasm.c
> @@ -63,7 +63,8 @@ enum opcode {
>  	insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
>  	insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
>  	insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
> -	insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins
> +	insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
> +	insn_dins
>  };
>  
>  struct insn {
> @@ -128,6 +129,7 @@ static struct insn insn_table[] __cpuinitdata = {
>  	{ insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
>  	{ insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
>  	{ insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
> +	{ insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
>  	{ insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
>  	{ insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
>  	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
> @@ -381,6 +383,7 @@ I_u2u1u3(_srl)
>  I_u3u1u2(_subu)
>  I_u2s3u1(_sw)
>  I_0(_tlbp)
> +I_0(_tlbr)
>  I_0(_tlbwi)
>  I_0(_tlbwr)
>  I_u3u1u2(_xor)
>   

WBR, Sergei

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/4] MIPS: Add TLBP to uasm.
  2010-02-08 10:58   ` Sergei Shtylyov
@ 2010-02-08 17:19     ` David Daney
  0 siblings, 0 replies; 7+ messages in thread
From: David Daney @ 2010-02-08 17:19 UTC (permalink / raw)
  To: Sergei Shtylyov; +Cc: linux-mips, ralf

Sergei Shtylyov wrote:
> Hello.
> 
> David Daney wrote:
> 
>> The soon to follow Read Inhibit/eXecute Inhibit patch needs TLBP
>>   
> 
>  But you're adding TLBR support, not TLBP?
> 

Right.

I am making more changes to this patch set and will correct that.

Thanks,
David Daney

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2010-02-08 17:20 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-02-05 23:26 [PATCH 0/4] MIPS Read Inhibit/eXecute Inhibit support David Daney
2010-02-05 23:27 ` [PATCH 1/4] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels David Daney
2010-02-05 23:27 ` [PATCH 2/4] MIPS: Add accessor functions and bit definitions for c0_PageGrain David Daney
2010-02-05 23:27 ` [PATCH 3/4] MIPS: Add TLBP to uasm David Daney
2010-02-08 10:58   ` Sergei Shtylyov
2010-02-08 17:19     ` David Daney
2010-02-05 23:27 ` [PATCH 4/4] MIPS: Implement Read Inhibit/eXecute Inhibit David Daney

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.