All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3)
@ 2009-03-20  5:34 Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors " Benjamin Herrenschmidt
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

This patch tweaks the way some PTE bit combinations are defined, in such a
way that the 32 and 64-bit variant become almost identical and that will
make it easier to bring in a new common pte-* file for the new variant
of the Book3-E support.

The combination of bits defining access to kernel pages are now clearly
separated from the combination used by userspace and the core VM. The
resulting generated code should remain identical unless I made a mistake.

Note: While at it, I removed a non-sensical statement related to CONFIG_KGDB
in ppc_mmu_32.c which could cause kernel mappings to be user accessible when
that option is enabled. Probably something that bitrot.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

v2: Fix mixup with next patch
v3: Fix loss of PAGE_KERNEL_EXEC which broke modules on ppc64

 arch/powerpc/include/asm/fixmap.h        |    2 -
 arch/powerpc/include/asm/pgtable-ppc32.h |   39 ++++++++++++-------------
 arch/powerpc/include/asm/pgtable-ppc64.h |   46 +++++++++++++++++++-----------
 arch/powerpc/include/asm/pgtable.h       |    4 ++
 arch/powerpc/include/asm/pte-8xx.h       |    3 +
 arch/powerpc/include/asm/pte-hash32.h    |    1 
 arch/powerpc/include/asm/pte-hash64-4k.h |    3 -
 arch/powerpc/include/asm/pte-hash64.h    |   47 +++++++++++++++++--------------
 arch/powerpc/mm/fsl_booke_mmu.c          |    2 -
 arch/powerpc/mm/pgtable_32.c             |    4 +-
 arch/powerpc/mm/ppc_mmu_32.c             |   10 +-----
 arch/powerpc/sysdev/cpm_common.c         |    2 -
 12 files changed, 88 insertions(+), 75 deletions(-)

--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:46:06.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:49:51.000000000 +1100
@@ -81,11 +81,6 @@
  */
 #include <asm/pte-hash64.h>
 
-/* To make some generic powerpc code happy */
-#ifndef _PAGE_HWEXEC
-#define _PAGE_HWEXEC		0
-#endif
-
 /* Some other useful definitions */
 #define PTE_RPN_MAX	(1UL << (64 - PTE_RPN_SHIFT))
 #define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
@@ -96,28 +91,44 @@
 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
                          _PAGE_ACCESSED | _PAGE_SPECIAL)
 
+#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
 
 
-/* __pgprot defined in arch/powerpc/include/asm/page.h */
-#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-
-#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
-#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ */
+#define PAGE_NONE	__pgprot(_PAGE_BASE)
+#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_WRENABLE)
-#define PAGE_KERNEL_CI	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
-			       _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
 
-#define PAGE_AGP	__pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
-#define HAVE_PAGE_AGP
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
+
+/* Protection bits for use by pte_pgprot() */
+#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | \
+			 _PAGE_NO_CACHE | _PAGE_WRITETHRU |		\
+			 _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW |		\
+			 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
+
 
 /* We always have _PAGE_SPECIAL on 64 bit */
 #define __HAVE_ARCH_PTE_SPECIAL
 
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
 
 /*
  * POWER4 and newer have per page execute protection, older chips can only
@@ -395,7 +406,8 @@ static inline void pte_clear(struct mm_s
 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 {
 	unsigned long bits = pte_val(entry) &
-		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
+		 _PAGE_EXEC | _PAGE_HWEXEC);
 	unsigned long old, tmp;
 
 	__asm__ __volatile__(
Index: linux-work/arch/powerpc/include/asm/pte-hash64-4k.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pte-hash64-4k.h	2009-03-20 15:46:06.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pte-hash64-4k.h	2009-03-20 15:47:40.000000000 +1100
@@ -8,9 +8,6 @@
 #define _PAGE_F_GIX     _PAGE_GROUP_IX
 #define _PAGE_SPECIAL	0x10000 /* software: special page */
 
-/* There is no 4K PFN hack on 4K pages */
-#define _PAGE_4K_PFN	0
-
 /* PTE flags to conserve for HPTE identification */
 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
 			 _PAGE_SECONDARY | _PAGE_GROUP_IX)
Index: linux-work/arch/powerpc/include/asm/pte-hash64.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pte-hash64.h	2009-03-20 15:46:06.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pte-hash64.h	2009-03-20 15:47:40.000000000 +1100
@@ -6,36 +6,41 @@
  * Common bits between 4K and 64K pages in a linux-style PTE.
  * These match the bits in the (hardware-defined) PowerPC PTE as closely
  * as possible. Additional bits may be defined in pgtable-hash64-*.h
+ *
+ * Note: We only support user read/write permissions. Supervisor always
+ * have full read/write to pages above PAGE_OFFSET (pages below that
+ * always use the user access permissions).
+ *
+ * We could create separate kernel read-only if we used the 3 PP bits
+ * combinations that newer processors provide but we currently don't.
  */
-#define _PAGE_PRESENT	0x0001 /* software: pte contains a translation */
-#define _PAGE_USER	0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE	0x0002 /* (!present only) software: pte holds file offset */
-#define _PAGE_EXEC	0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED	0x0008
-#define _PAGE_COHERENT	0x0010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE	0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU	0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY	0x0080 /* C: page changed */
-#define _PAGE_ACCESSED	0x0100 /* R: page referenced */
-#define _PAGE_RW	0x0200 /* software: user write access allowed */
-#define _PAGE_BUSY	0x0800 /* software: PTE & hash are busy */
+#define _PAGE_PRESENT		0x0001 /* software: pte contains a translation */
+#define _PAGE_USER		0x0002 /* matches one of the PP bits */
+#define _PAGE_FILE		0x0002 /* (!present only) software: pte holds file offset */
+#define _PAGE_EXEC		0x0004 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED		0x0008
+#define _PAGE_COHERENT		0x0010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE		0x0020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU		0x0040 /* W: cache write-through */
+#define _PAGE_DIRTY		0x0080 /* C: page changed */
+#define _PAGE_ACCESSED		0x0100 /* R: page referenced */
+#define _PAGE_RW		0x0200 /* software: user write access allowed */
+#define _PAGE_BUSY		0x0800 /* software: PTE & hash are busy */
+
+/* No separate kernel read-only */
+#define _PAGE_KERNEL_RW		(_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+#define _PAGE_KERNEL_RO		 _PAGE_KERNEL_RW
 
 /* Strong Access Ordering */
-#define _PAGE_SAO	(_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+#define _PAGE_SAO		(_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
 
-#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-
-#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY)
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE		0
 
 /* PTEIDX nibble */
 #define _PTEIDX_SECONDARY	0x8
 #define _PTEIDX_GROUP_IX	0x7
 
-#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | \
-			 _PAGE_NO_CACHE | _PAGE_WRITETHRU |		\
-			 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER |		\
-			 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
-
 
 #ifdef CONFIG_PPC_64K_PAGES
 #include <asm/pte-hash64-64k.h>
Index: linux-work/arch/powerpc/include/asm/pgtable-ppc32.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc32.h	2009-03-20 15:47:39.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc32.h	2009-03-20 15:47:40.000000000 +1100
@@ -144,6 +144,13 @@ extern int icache_44x_need_flush;
 #define PMD_PAGE_SIZE(pmd)	bad_call_to_PMD_PAGE_SIZE()
 #endif
 
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO	0
+#endif
+#ifndef _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RW	(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#endif
+
 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
 
 /* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
@@ -186,30 +193,25 @@ extern int icache_44x_need_flush;
 #else
 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
 #endif
-#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
+#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
 
-#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
-#define _PAGE_KERNEL	(_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
-#define _PAGE_KERNEL_NC	(_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
-
-#ifdef CONFIG_PPC_STD_MMU
-/* On standard PPC MMU, no user access implies kernel read/write access,
- * so to write-protect kernel memory we must turn on user access */
-#define _PAGE_KERNEL_RO	(_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
-#else
-#define _PAGE_KERNEL_RO	(_PAGE_BASE | _PAGE_SHARED)
-#endif
-
-#define _PAGE_IO	(_PAGE_KERNEL_NC | _PAGE_GUARDED)
-#define _PAGE_RAM	(_PAGE_KERNEL | _PAGE_HWEXEC)
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
 
 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
 	defined(CONFIG_KPROBES)
 /* We want the debuggers to be able to set breakpoints anywhere, so
  * don't write protect the kernel text */
-#define _PAGE_RAM_TEXT	_PAGE_RAM
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
 #else
-#define _PAGE_RAM_TEXT	(_PAGE_KERNEL_RO | _PAGE_HWEXEC)
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
 #endif
 
 #define PAGE_NONE	__pgprot(_PAGE_BASE)
@@ -220,9 +222,6 @@ extern int icache_44x_need_flush;
 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
 
-#define PAGE_KERNEL		__pgprot(_PAGE_RAM)
-#define PAGE_KERNEL_NOCACHE	__pgprot(_PAGE_IO)
-
 /*
  * The PowerPC can only do execute protection on a segment (256MB) basis,
  * not on a page basis.  So we consider execute permission the same as read.
Index: linux-work/arch/powerpc/include/asm/pte-8xx.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pte-8xx.h	2009-03-20 15:46:06.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pte-8xx.h	2009-03-20 15:47:40.000000000 +1100
@@ -59,6 +59,9 @@
 /* Until my rework is finished, 8xx still needs atomic PTE updates */
 #define PTE_ATOMIC_UPDATES	1
 
+/* We need to add _PAGE_SHARED to kernel pages */
+#define _PAGE_KERNEL_RO	(_PAGE_SHARED)
+#define _PAGE_KERNEL_RW	(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
 
 #endif /* __KERNEL__ */
 #endif /*  _ASM_POWERPC_PTE_8xx_H */
Index: linux-work/arch/powerpc/include/asm/pte-hash32.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pte-hash32.h	2009-03-20 15:46:06.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pte-hash32.h	2009-03-20 15:47:40.000000000 +1100
@@ -44,6 +44,5 @@
 /* Hash table based platforms need atomic updates of the linux PTE */
 #define PTE_ATOMIC_UPDATES	1
 
-
 #endif /* __KERNEL__ */
 #endif /*  _ASM_POWERPC_PTE_HASH32_H */
Index: linux-work/arch/powerpc/mm/pgtable_32.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/pgtable_32.c	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/mm/pgtable_32.c	2009-03-20 15:47:40.000000000 +1100
@@ -164,7 +164,7 @@ __ioremap_caller(phys_addr_t addr, unsig
 
 	/* Make sure we have the base flags */
 	if ((flags & _PAGE_PRESENT) == 0)
-		flags |= _PAGE_KERNEL;
+		flags |= PAGE_KERNEL;
 
 	/* Non-cacheable page cannot be coherent */
 	if (flags & _PAGE_NO_CACHE)
@@ -296,7 +296,7 @@ void __init mapin_ram(void)
 	p = memstart_addr + s;
 	for (; s < total_lowmem; s += PAGE_SIZE) {
 		ktext = ((char *) v >= _stext && (char *) v < etext);
-		f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM;
+		f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
 		map_page(v, p, f);
 #ifdef CONFIG_PPC_STD_MMU_32
 		if (ktext)
Index: linux-work/arch/powerpc/mm/ppc_mmu_32.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/ppc_mmu_32.c	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/mm/ppc_mmu_32.c	2009-03-20 15:47:40.000000000 +1100
@@ -74,9 +74,6 @@ unsigned long p_mapped_by_bats(phys_addr
 
 unsigned long __init mmu_mapin_ram(void)
 {
-#ifdef CONFIG_POWER4
-	return 0;
-#else
 	unsigned long tot, bl, done;
 	unsigned long max_size = (256<<20);
 
@@ -95,7 +92,7 @@ unsigned long __init mmu_mapin_ram(void)
 			break;
 	}
 
-	setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
+	setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X);
 	done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
 	if ((done < tot) && !bat_addrs[3].limit) {
 		/* use BAT3 to cover a bit more */
@@ -103,12 +100,11 @@ unsigned long __init mmu_mapin_ram(void)
 		for (bl = 128<<10; bl < max_size; bl <<= 1)
 			if (bl * 2 > tot)
 				break;
-		setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
+		setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X);
 		done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
 	}
 
 	return done;
-#endif
 }
 
 /*
@@ -136,9 +132,7 @@ void __init setbat(int index, unsigned l
 		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
 		bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
 		bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
-#ifndef CONFIG_KGDB /* want user access for breakpoints */
 		if (flags & _PAGE_USER)
-#endif
 			bat[1].batu |= 1; 	/* Vp = 1 */
 		if (flags & _PAGE_GUARDED) {
 			/* G bit must be zero in IBATs */
Index: linux-work/arch/powerpc/sysdev/cpm_common.c
===================================================================
--- linux-work.orig/arch/powerpc/sysdev/cpm_common.c	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/sysdev/cpm_common.c	2009-03-20 15:47:40.000000000 +1100
@@ -56,7 +56,7 @@ void __init udbg_init_cpm(void)
 {
 	if (cpm_udbg_txdesc) {
 #ifdef CONFIG_CPM2
-		setbat(1, 0xf0000000, 0xf0000000, 1024*1024, _PAGE_IO);
+		setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);
 #endif
 		udbg_putc = udbg_putc_cpm;
 	}
Index: linux-work/arch/powerpc/include/asm/fixmap.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/fixmap.h	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/fixmap.h	2009-03-20 15:47:40.000000000 +1100
@@ -61,7 +61,7 @@ extern void __set_fixmap (enum fixed_add
  * Some hardware wants to get fixmapped without caching.
  */
 #define set_fixmap_nocache(idx, phys) \
-		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+		__set_fixmap(idx, phys, PAGE_KERNEL_NCG)
 
 #define clear_fixmap(idx) \
 		__set_fixmap(idx, 0, __pgprot(0))
Index: linux-work/arch/powerpc/include/asm/pgtable.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pgtable.h	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable.h	2009-03-20 15:47:40.000000000 +1100
@@ -25,6 +25,10 @@ static inline void assert_pte_locked(str
 #  include <asm/pgtable-ppc32.h>
 #endif
 
+/* Special mapping for AGP */
+#define PAGE_AGP	(PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
 #ifndef __ASSEMBLY__
 
 /* Insert a PTE, top-level function is out of line. It uses an inline
Index: linux-work/arch/powerpc/mm/fsl_booke_mmu.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/fsl_booke_mmu.c	2009-03-20 15:46:00.000000000 +1100
+++ linux-work/arch/powerpc/mm/fsl_booke_mmu.c	2009-03-20 15:47:40.000000000 +1100
@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(void)
 	phys_addr_t phys = memstart_addr;
 
 	while (cam[tlbcam_index] && tlbcam_index < ARRAY_SIZE(cam)) {
-		settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], _PAGE_KERNEL, 0);
+		settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], PAGE_KERNEL_X, 0);
 		virt += cam[tlbcam_index];
 		phys += cam[tlbcam_index];
 		tlbcam_index++;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors definitions (v3)
  2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
@ 2009-03-20  5:34 ` Benjamin Herrenschmidt
  2009-03-20  9:11   ` Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 3/6] powerpc/mm: Rename arch/powerpc/kernel/mmap.c to mmap_64.c Benjamin Herrenschmidt
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

Now that they are almost identical, we can merge some of the definitions
related to the PTE format into common files.

This creates a new pte-common.h which is included by both 32 and 64-bit
right after the CPU specific pte-*.h file, and which defines some
bits to "default" values if they haven't been defined already, and
then provides a generic definition of most of the bit combinations
based on these and exposed to the rest of the kernel.

I also moved to the common pgtable.h most of the "small" accessors to the
PTE bits and modification helpers (pte_mk*). The actual accessors remain
in their separate files.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

v2: Fix mixup with previous patch
v3: Rebase due to change in previous patch and fix pfn_pte() for 32-bit
    platforms with >32 bit physical address space

 arch/powerpc/include/asm/pgtable-ppc32.h |  204 -------------------------------
 arch/powerpc/include/asm/pgtable-ppc64.h |  132 --------------------
 arch/powerpc/include/asm/pgtable.h       |   54 +++++++-
 arch/powerpc/include/asm/pte-common.h    |  180 +++++++++++++++++++++++++++
 4 files changed, 233 insertions(+), 337 deletions(-)

--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc32.h	2009-03-20 15:47:40.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc32.h	2009-03-20 15:50:39.000000000 +1100
@@ -97,174 +97,11 @@ extern int icache_44x_need_flush;
 #include <asm/pte-hash32.h>
 #endif
 
-/* If _PAGE_SPECIAL is defined, then we advertise our support for it */
-#ifdef _PAGE_SPECIAL
-#define __HAVE_ARCH_PTE_SPECIAL
-#endif
-
-/*
- * Some bits are only used on some cpu families... Make sure that all
- * the undefined gets defined as 0
- */
-#ifndef _PAGE_HASHPTE
-#define _PAGE_HASHPTE	0
-#endif
-#ifndef _PTE_NONE_MASK
-#define _PTE_NONE_MASK 0
-#endif
-#ifndef _PAGE_SHARED
-#define _PAGE_SHARED	0
-#endif
-#ifndef _PAGE_HWWRITE
-#define _PAGE_HWWRITE	0
-#endif
-#ifndef _PAGE_HWEXEC
-#define _PAGE_HWEXEC	0
-#endif
-#ifndef _PAGE_EXEC
-#define _PAGE_EXEC	0
-#endif
-#ifndef _PAGE_ENDIAN
-#define _PAGE_ENDIAN	0
-#endif
-#ifndef _PAGE_COHERENT
-#define _PAGE_COHERENT	0
-#endif
-#ifndef _PAGE_WRITETHRU
-#define _PAGE_WRITETHRU	0
-#endif
-#ifndef _PAGE_SPECIAL
-#define _PAGE_SPECIAL	0
-#endif
-#ifndef _PMD_PRESENT_MASK
-#define _PMD_PRESENT_MASK	_PMD_PRESENT
-#endif
-#ifndef _PMD_SIZE
-#define _PMD_SIZE	0
-#define PMD_PAGE_SIZE(pmd)	bad_call_to_PMD_PAGE_SIZE()
-#endif
-
-#ifndef _PAGE_KERNEL_RO
-#define _PAGE_KERNEL_RO	0
-#endif
-#ifndef _PAGE_KERNEL_RW
-#define _PAGE_KERNEL_RW	(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
-#endif
-
-#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
-
-/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
- * here (ie, naturally aligned). Platform who don't just pre-define the
- * value so we don't override it here
- */
-#ifndef PTE_RPN_SHIFT
-#define PTE_RPN_SHIFT	(PAGE_SHIFT)
-#endif
-
-#ifdef CONFIG_PTE_64BIT
-#define PTE_RPN_MAX	(1ULL << (64 - PTE_RPN_SHIFT))
-#define PTE_RPN_MASK	(~((1ULL<<PTE_RPN_SHIFT)-1))
-#else
-#define PTE_RPN_MAX	(1UL << (32 - PTE_RPN_SHIFT))
-#define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
-#endif
-
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
- * pgprot changes
- */
-#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
-                         _PAGE_ACCESSED | _PAGE_SPECIAL)
-
-/* Mask of bits returned by pte_pgprot() */
-#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
-			 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
-			 _PAGE_USER | _PAGE_ACCESSED | \
-			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
-			 _PAGE_EXEC | _PAGE_HWEXEC)
-
-/*
- * We define 2 sets of base prot bits, one for basic pages (ie,
- * cacheable kernel and user pages) and one for non cacheable
- * pages. We always set _PAGE_COHERENT when SMP is enabled or
- * the processor might need it for DMA coherency.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
-#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-#else
-#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
-#endif
-#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
-
-/* Permission masks used for kernel mappings */
-#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
-#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
-				 _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
-				 _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
-
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
-	defined(CONFIG_KPROBES)
-/* We want the debuggers to be able to set breakpoints anywhere, so
- * don't write protect the kernel text */
-#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
-#endif
-
-#define PAGE_NONE	__pgprot(_PAGE_BASE)
-#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-
-/*
- * The PowerPC can only do execute protection on a segment (256MB) basis,
- * not on a page basis.  So we consider execute permission the same as read.
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY_X
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY_X
-#define __P100	PAGE_READONLY
-#define __P101	PAGE_READONLY_X
-#define __P110	PAGE_COPY
-#define __P111	PAGE_COPY_X
-
-#define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY_X
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED_X
-#define __S100	PAGE_READONLY
-#define __S101	PAGE_READONLY_X
-#define __S110	PAGE_SHARED
-#define __S111	PAGE_SHARED_X
+/* And here we include common definitions */
+#include <asm/pte-common.h>
 
 #ifndef __ASSEMBLY__
-/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
- * kernel without large page PMD support */
-extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
 
-/*
- * Conversions between PTE values and page frame numbers.
- */
-
-#define pte_pfn(x)		(pte_val(x) >> PTE_RPN_SHIFT)
-#define pte_page(x)		pfn_to_page(pte_pfn(x))
-
-#define pfn_pte(pfn, prot)	__pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\
-					pgprot_val(prot))
-#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
-#endif /* __ASSEMBLY__ */
-
-#define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
-#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
 #define pte_clear(mm, addr, ptep) \
 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
 
@@ -273,43 +110,6 @@ extern unsigned long bad_call_to_PMD_PAG
 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
 #define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
 
-#ifndef __ASSEMBLY__
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }
-static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) {
-	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
-	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
-	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-
-static inline pte_t pte_mkwrite(pte_t pte) {
-	pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
-	pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
-	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
-	pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
-	return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
-}
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
-	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
-	return pte;
-}
-
 /*
  * When flushing the tlb entry for a page, we also need to flush the hash
  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
Index: linux-work/arch/powerpc/include/asm/pgtable-ppc64.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:49:51.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:52:59.000000000 +1100
@@ -80,82 +80,8 @@
  * Include the PTE bits definitions
  */
 #include <asm/pte-hash64.h>
+#include <asm/pte-common.h
 
-/* Some other useful definitions */
-#define PTE_RPN_MAX	(1UL << (64 - PTE_RPN_SHIFT))
-#define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
-
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
- * pgprot changes
- */
-#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
-                         _PAGE_ACCESSED | _PAGE_SPECIAL)
-
-#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
-
-
-/* Permission masks used to generate the __P and __S table,
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- */
-#define PAGE_NONE	__pgprot(_PAGE_BASE)
-#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-
-/* Permission masks used for kernel mappings */
-#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
-#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
-				 _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
-				 _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
-
-/* Protection bits for use by pte_pgprot() */
-#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | \
-			 _PAGE_NO_CACHE | _PAGE_WRITETHRU |		\
-			 _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW |		\
-			 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
-
-
-/* We always have _PAGE_SPECIAL on 64 bit */
-#define __HAVE_ARCH_PTE_SPECIAL
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
-
-/*
- * POWER4 and newer have per page execute protection, older chips can only
- * do this on a segment (256MB) basis.
- *
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- *
- * Note due to the way vm flags are laid out, the bits are XWR
- */
-#define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY
-#define __P100	PAGE_READONLY_X
-#define __P101	PAGE_READONLY_X
-#define __P110	PAGE_COPY_X
-#define __P111	PAGE_COPY_X
-
-#define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED
-#define __S100	PAGE_READONLY_X
-#define __S101	PAGE_READONLY_X
-#define __S110	PAGE_SHARED_X
-#define __S111	PAGE_SHARED_X
 
 #ifdef CONFIG_PPC_MM_SLICES
 #define HAVE_ARCH_UNMAPPED_AREA
@@ -196,34 +122,8 @@
 #endif /* __real_pte */
 
 
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * mk_pte takes a (struct page *) as input
- */
-#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
-
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
-{
-	pte_t pte;
-
-
-	pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
-	return pte;
-}
-
-#define pte_modify(_pte, newprot) \
-  (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
-
-#define pte_none(pte)		((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
-#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
-
 /* pte_clear moved to later in this file */
 
-#define pte_pfn(x)		((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
-#define pte_page(x)		pfn_to_page(pte_pfn(x))
-
 #define PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
 #define PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
 
@@ -271,36 +171,6 @@ static inline pte_t pfn_pte(unsigned lon
 /* This now only contains the vmalloc pages */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
-static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) {
-	pte_val(pte) &= ~(_PAGE_RW); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
-	pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
-	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
-	pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
-	pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
-	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
-	return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
-	pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
-	return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
-}
 
 /* Atomic PTE updates */
 static inline unsigned long pte_update(struct mm_struct *mm,
Index: linux-work/arch/powerpc/include/asm/pgtable.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pgtable.h	2009-03-20 15:47:40.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable.h	2009-03-20 15:55:06.000000000 +1100
@@ -25,12 +25,58 @@ static inline void assert_pte_locked(str
 #  include <asm/pgtable-ppc32.h>
 #endif
 
-/* Special mapping for AGP */
-#define PAGE_AGP	(PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
 #ifndef __ASSEMBLY__
 
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }
+static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_PRESENT; }
+static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+		     pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte)	{
+	return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Keep these as a macros to avoid include dependency mess */
+#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte) {
+	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+	pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+	pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+	pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) {
+	return pte; }
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
+}
+
+
 /* Insert a PTE, top-level function is out of line. It uses an inline
  * low level function in the respective pgtable-* files
  */
Index: linux-work/arch/powerpc/include/asm/pte-common.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-work/arch/powerpc/include/asm/pte-common.h	2009-03-20 15:51:48.000000000 +1100
@@ -0,0 +1,180 @@
+/* Included from asm/pgtable-*.h only ! */
+
+/*
+ * Some bits are only used on some cpu families... Make sure that all
+ * the undefined gets a sensible default
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE	0
+#endif
+#ifndef _PAGE_SHARED
+#define _PAGE_SHARED	0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE	0
+#endif
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC	0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC	0
+#endif
+#ifndef _PAGE_ENDIAN
+#define _PAGE_ENDIAN	0
+#endif
+#ifndef _PAGE_COHERENT
+#define _PAGE_COHERENT	0
+#endif
+#ifndef _PAGE_WRITETHRU
+#define _PAGE_WRITETHRU	0
+#endif
+#ifndef _PAGE_SPECIAL
+#define _PAGE_SPECIAL	0
+#endif
+#ifndef _PAGE_4K_PFN
+#define _PAGE_4K_PFN		0
+#endif
+#ifndef _PAGE_PSIZE
+#define _PAGE_PSIZE		0
+#endif
+#ifndef _PMD_PRESENT_MASK
+#define _PMD_PRESENT_MASK	_PMD_PRESENT
+#endif
+#ifndef _PMD_SIZE
+#define _PMD_SIZE	0
+#define PMD_PAGE_SIZE(pmd)	bad_call_to_PMD_PAGE_SIZE()
+#endif
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO	0
+#endif
+#ifndef _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RW	(_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#endif
+#ifndef _PAGE_HPTEFLAGS
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK	_PAGE_HPTEFLAGS
+#endif
+
+/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
+ * kernel without large page PMD support
+ */
+#ifndef __ASSEMBLY__
+extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
+#endif /* __ASSEMBLY__ */
+
+/* Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here
+ */
+#ifndef PTE_RPN_SHIFT
+#define PTE_RPN_SHIFT	(PAGE_SHIFT)
+#endif
+
+/* The mask convered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#define PTE_RPN_MAX	(1ULL << (64 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK	(~((1ULL<<PTE_RPN_SHIFT)-1))
+#else
+#define PTE_RPN_MAX	(1UL << (32 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
+#endif
+
+/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+                         _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+/* Mask of bits returned by pte_pgprot() */
+#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+			 _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
+			 _PAGE_USER | _PAGE_ACCESSED | \
+			 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
+			 _PAGE_EXEC | _PAGE_HWEXEC)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE	(_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE	__pgprot(_PAGE_BASE)
+#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY
+#define __P100	PAGE_READONLY_X
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY_X
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY_X
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED_X
+#define __S111	PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+				 _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
+
+/* Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+	defined(CONFIG_KPROBES)
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP		(PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
+/* Advertise support for _PAGE_SPECIAL */
+#ifdef _PAGE_SPECIAL
+#define __HAVE_ARCH_PTE_SPECIAL
+#endif
+

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/6] powerpc/mm: Rename arch/powerpc/kernel/mmap.c to mmap_64.c
  2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors " Benjamin Herrenschmidt
@ 2009-03-20  5:34 ` Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 4/6] powerpc/mm: Fix printk type warning in mmu_context_nohash Benjamin Herrenschmidt
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

This file is only useful on 64-bit, so we name it accordingly.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/mm/Makefile  |    2 
 arch/powerpc/mm/mmap.c    |  109 ----------------------------------------------
 arch/powerpc/mm/mmap_64.c |  109 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 110 insertions(+), 110 deletions(-)

--- linux-work.orig/arch/powerpc/mm/Makefile	2009-03-10 16:08:34.000000000 +1100
+++ linux-work/arch/powerpc/mm/Makefile	2009-03-10 16:23:44.000000000 +1100
@@ -14,7 +14,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH)	+= mmu_cont
 hash-$(CONFIG_PPC_NATIVE)	:= hash_native_64.o
 obj-$(CONFIG_PPC64)		+= hash_utils_64.o \
 				   slb_low.o slb.o stab.o \
-				   mmap.o $(hash-y)
+				   mmap_64.o $(hash-y)
 obj-$(CONFIG_PPC_STD_MMU_32)	+= ppc_mmu_32.o
 obj-$(CONFIG_PPC_STD_MMU)	+= hash_low_$(CONFIG_WORD_SIZE).o \
 				   tlb_hash$(CONFIG_WORD_SIZE).o \
Index: linux-work/arch/powerpc/mm/mmap.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/mmap.c	2009-03-10 16:08:18.000000000 +1100
+++ /dev/null	1970-01-01 00:00:00.000000000 +0000
@@ -1,109 +0,0 @@
-/*
- *  flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/sched.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~128 MB hole on 32bit applications.
- *
- * On 64bit applications we randomise the stack by 1GB so we need to
- * space our mmap start address by a further 1GB, otherwise there is a
- * chance the mmap area will end up closer to the stack than our ulimit
- * requires.
- */
-#define MIN_GAP32 (128*1024*1024)
-#define MIN_GAP64 ((128 + 1024)*1024*1024UL)
-#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline int mmap_is_legacy(void)
-{
-	if (current->personality & ADDR_COMPAT_LAYOUT)
-		return 1;
-
-	if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
-		return 1;
-
-	return sysctl_legacy_va_layout;
-}
-
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window,
- * we will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will
- * be the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
-static unsigned long mmap_rnd(void)
-{
-	unsigned long rnd = 0;
-
-	if (current->flags & PF_RANDOMIZE) {
-		/* 8MB for 32bit, 1GB for 64bit */
-		if (is_32bit_task())
-			rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT)));
-		else
-			rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT)));
-	}
-	return (rnd << PAGE_SHIFT) * 2;
-}
-
-static inline unsigned long mmap_base(void)
-{
-	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
-
-	if (gap < MIN_GAP)
-		gap = MIN_GAP;
-	else if (gap > MAX_GAP)
-		gap = MAX_GAP;
-
-	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
-	/*
-	 * Fall back to the standard layout if the personality
-	 * bit is set, or if the expected stack growth is unlimited:
-	 */
-	if (mmap_is_legacy()) {
-		mm->mmap_base = TASK_UNMAPPED_BASE;
-		mm->get_unmapped_area = arch_get_unmapped_area;
-		mm->unmap_area = arch_unmap_area;
-	} else {
-		mm->mmap_base = mmap_base();
-		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-		mm->unmap_area = arch_unmap_area_topdown;
-	}
-}
Index: linux-work/arch/powerpc/mm/mmap_64.c
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-work/arch/powerpc/mm/mmap_64.c	2009-03-10 16:08:34.000000000 +1100
@@ -0,0 +1,109 @@
+/*
+ *  flexible mmap layout support
+ *
+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ * Started by Ingo Molnar <mingo@elte.hu>
+ */
+
+#include <linux/personality.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+
+/*
+ * Top of mmap area (just below the process stack).
+ *
+ * Leave at least a ~128 MB hole on 32bit applications.
+ *
+ * On 64bit applications we randomise the stack by 1GB so we need to
+ * space our mmap start address by a further 1GB, otherwise there is a
+ * chance the mmap area will end up closer to the stack than our ulimit
+ * requires.
+ */
+#define MIN_GAP32 (128*1024*1024)
+#define MIN_GAP64 ((128 + 1024)*1024*1024UL)
+#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+static inline int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+
+	if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
+		return 1;
+
+	return sysctl_legacy_va_layout;
+}
+
+/*
+ * Since get_random_int() returns the same value within a 1 jiffy window,
+ * we will almost always get the same randomisation for the stack and mmap
+ * region. This will mean the relative distance between stack and mmap will
+ * be the same.
+ *
+ * To avoid this we can shift the randomness by 1 bit.
+ */
+static unsigned long mmap_rnd(void)
+{
+	unsigned long rnd = 0;
+
+	if (current->flags & PF_RANDOMIZE) {
+		/* 8MB for 32bit, 1GB for 64bit */
+		if (is_32bit_task())
+			rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT)));
+		else
+			rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT)));
+	}
+	return (rnd << PAGE_SHIFT) * 2;
+}
+
+static inline unsigned long mmap_base(void)
+{
+	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+
+	if (gap < MIN_GAP)
+		gap = MIN_GAP;
+	else if (gap > MAX_GAP)
+		gap = MAX_GAP;
+
+	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	/*
+	 * Fall back to the standard layout if the personality
+	 * bit is set, or if the expected stack growth is unlimited:
+	 */
+	if (mmap_is_legacy()) {
+		mm->mmap_base = TASK_UNMAPPED_BASE;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+		mm->unmap_area = arch_unmap_area;
+	} else {
+		mm->mmap_base = mmap_base();
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+		mm->unmap_area = arch_unmap_area_topdown;
+	}
+}

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 4/6] powerpc/mm: Fix printk type warning in mmu_context_nohash
  2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors " Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 3/6] powerpc/mm: Rename arch/powerpc/kernel/mmap.c to mmap_64.c Benjamin Herrenschmidt
@ 2009-03-20  5:34 ` Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 5/6] powerpc/mm: Add option for non-atomic PTE updates to ppc64 Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 6/6] powerpc/mm: Introduce early_init_mmu() on 64-bit Benjamin Herrenschmidt
  4 siblings, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

We need to use %zu instead of %d when printing a sizeof()

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/mm/mmu_context_nohash.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- linux-work.orig/arch/powerpc/mm/mmu_context_nohash.c	2009-03-11 11:51:05.000000000 +1100
+++ linux-work/arch/powerpc/mm/mmu_context_nohash.c	2009-03-11 11:52:55.000000000 +1100
@@ -380,7 +380,7 @@ void __init mmu_context_init(void)
 #endif
 
 	printk(KERN_INFO
-	       "MMU: Allocated %d bytes of context maps for %d contexts\n",
+	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
 	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
 	       last_context - first_context + 1);
 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 5/6] powerpc/mm: Add option for non-atomic PTE updates to ppc64
  2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
                   ` (2 preceding siblings ...)
  2009-03-20  5:34 ` [PATCH 4/6] powerpc/mm: Fix printk type warning in mmu_context_nohash Benjamin Herrenschmidt
@ 2009-03-20  5:34 ` Benjamin Herrenschmidt
  2009-03-20  5:34 ` [PATCH 6/6] powerpc/mm: Introduce early_init_mmu() on 64-bit Benjamin Herrenschmidt
  4 siblings, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

ppc32 has it already, add it to ppc64 as a preliminary for adding
support for Book3E 64-bit support

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/include/asm/pgtable-ppc64.h |   12 +++++++++++-
 arch/powerpc/include/asm/pte-hash64.h    |    2 ++
 2 files changed, 13 insertions(+), 1 deletion(-)

--- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-10 15:43:26.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-10 16:08:30.000000000 +1100
@@ -178,6 +178,7 @@ static inline unsigned long pte_update(s
 				       pte_t *ptep, unsigned long clr,
 				       int huge)
 {
+#ifdef PTE_ATOMIC_UPDATES
 	unsigned long old, tmp;
 
 	__asm__ __volatile__(
@@ -190,7 +191,10 @@ static inline unsigned long pte_update(s
 	: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
 	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
 	: "cc" );
-
+#else
+	unsigned long old = pte_val(*ptep);
+	*ptep = __pte(old & ~clr);
+#endif
 	/* huge pages use the old page table lock */
 	if (!huge)
 		assert_pte_locked(mm, addr);
@@ -278,6 +282,8 @@ static inline void __ptep_set_access_fla
 	unsigned long bits = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
 		 _PAGE_EXEC | _PAGE_HWEXEC);
+
+#ifdef PTE_ATOMIC_UPDATES
 	unsigned long old, tmp;
 
 	__asm__ __volatile__(
@@ -290,6 +296,10 @@ static inline void __ptep_set_access_fla
 	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
 	:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
 	:"cc");
+#else
+	unsigned long old = pte_val(*ptep);
+	*ptep = __pte(old | bits);
+#endif
 }
 
 #define __HAVE_ARCH_PTE_SAME
Index: linux-work/arch/powerpc/include/asm/pte-hash64.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/pte-hash64.h	2009-03-10 15:49:50.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/pte-hash64.h	2009-03-10 15:49:55.000000000 +1100
@@ -41,6 +41,8 @@
 #define _PTEIDX_SECONDARY	0x8
 #define _PTEIDX_GROUP_IX	0x7
 
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES	1
 
 #ifdef CONFIG_PPC_64K_PAGES
 #include <asm/pte-hash64-64k.h>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 6/6] powerpc/mm: Introduce early_init_mmu() on 64-bit
  2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
                   ` (3 preceding siblings ...)
  2009-03-20  5:34 ` [PATCH 5/6] powerpc/mm: Add option for non-atomic PTE updates to ppc64 Benjamin Herrenschmidt
@ 2009-03-20  5:34 ` Benjamin Herrenschmidt
  4 siblings, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  5:34 UTC (permalink / raw)
  To: linuxppc-dev

This moves some MMU related init code out of setup_64.c into hash_utils_64.c
and calls it early_init_mmu() and early_init_mmu_secondary(). This will
make it easier to plug in a new MMU type.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

 arch/powerpc/include/asm/mmu-hash64.h |    2 -
 arch/powerpc/include/asm/mmu.h        |    4 +++
 arch/powerpc/kernel/setup_64.c        |   35 ++++-----------------------------
 arch/powerpc/mm/hash_utils_64.c       |   36 ++++++++++++++++++++++++++++++++--
 4 files changed, 43 insertions(+), 34 deletions(-)

--- linux-work.orig/arch/powerpc/include/asm/mmu-hash64.h	2009-03-11 13:43:26.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/mmu-hash64.h	2009-03-11 13:43:29.000000000 +1100
@@ -284,8 +284,6 @@ extern void add_gpage(unsigned long addr
 			  unsigned long number_of_pages);
 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
 
-extern void htab_initialize(void);
-extern void htab_initialize_secondary(void);
 extern void hpte_init_native(void);
 extern void hpte_init_lpar(void);
 extern void hpte_init_iSeries(void);
Index: linux-work/arch/powerpc/include/asm/mmu.h
===================================================================
--- linux-work.orig/arch/powerpc/include/asm/mmu.h	2009-03-11 13:43:39.000000000 +1100
+++ linux-work/arch/powerpc/include/asm/mmu.h	2009-03-11 13:48:40.000000000 +1100
@@ -56,6 +56,10 @@ static inline int mmu_has_feature(unsign
 
 extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
 
+/* MMU initialization (64-bit only fo now) */
+extern void early_init_mmu(void);
+extern void early_init_mmu_secondary(void);
+
 #endif /* !__ASSEMBLY__ */
 
 
Index: linux-work/arch/powerpc/kernel/setup_64.c
===================================================================
--- linux-work.orig/arch/powerpc/kernel/setup_64.c	2009-03-11 13:42:09.000000000 +1100
+++ linux-work/arch/powerpc/kernel/setup_64.c	2009-03-11 13:55:15.000000000 +1100
@@ -202,8 +202,6 @@ void __init early_setup(unsigned long dt
 
 	/* Fix up paca fields required for the boot cpu */
 	get_paca()->cpu_start = 1;
-	get_paca()->stab_real = __pa((u64)&initial_stab);
-	get_paca()->stab_addr = (u64)&initial_stab;
 
 	/* Probe the machine type */
 	probe_machine();
@@ -212,20 +210,8 @@ void __init early_setup(unsigned long dt
 
 	DBG("Found, Initializing memory management...\n");
 
-	/*
-	 * Initialize the MMU Hash table and create the linear mapping
-	 * of memory. Has to be done before stab/slb initialization as
-	 * this is currently where the page size encoding is obtained
-	 */
-	htab_initialize();
-
-	/*
-	 * Initialize stab / SLB management except on iSeries
-	 */
-	if (cpu_has_feature(CPU_FTR_SLB))
-		slb_initialize();
-	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
-		stab_initialize(get_paca()->stab_real);
+	/* Initialize the hash table or TLB handling */
+	early_init_mmu();
 
 	DBG(" <- early_setup()\n");
 }
@@ -233,22 +219,11 @@ void __init early_setup(unsigned long dt
 #ifdef CONFIG_SMP
 void early_setup_secondary(void)
 {
-	struct paca_struct *lpaca = get_paca();
-
 	/* Mark interrupts enabled in PACA */
-	lpaca->soft_enabled = 0;
-
-	/* Initialize hash table for that CPU */
-	htab_initialize_secondary();
+	get_paca()->soft_enabled = 0;
 
-	/* Initialize STAB/SLB. We use a virtual address as it works
-	 * in real mode on pSeries and we want a virutal address on
-	 * iSeries anyway
-	 */
-	if (cpu_has_feature(CPU_FTR_SLB))
-		slb_initialize();
-	else
-		stab_initialize(lpaca->stab_addr);
+	/* Initialize the hash table or TLB handling */
+	early_init_mmu_secondary();
 }
 
 #endif /* CONFIG_SMP */
Index: linux-work/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- linux-work.orig/arch/powerpc/mm/hash_utils_64.c	2009-03-11 13:41:36.000000000 +1100
+++ linux-work/arch/powerpc/mm/hash_utils_64.c	2009-03-11 13:54:50.000000000 +1100
@@ -590,7 +590,7 @@ static void __init htab_finish_init(void
 	make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
 }
 
-void __init htab_initialize(void)
+static void __init htab_initialize(void)
 {
 	unsigned long table;
 	unsigned long pteg_count;
@@ -732,11 +732,43 @@ void __init htab_initialize(void)
 #undef KB
 #undef MB
 
-void htab_initialize_secondary(void)
+void __init early_init_mmu(void)
 {
+	/* Setup initial STAB address in the PACA */
+	get_paca()->stab_real = __pa((u64)&initial_stab);
+	get_paca()->stab_addr = (u64)&initial_stab;
+
+	/* Initialize the MMU Hash table and create the linear mapping
+	 * of memory. Has to be done before stab/slb initialization as
+	 * this is currently where the page size encoding is obtained
+	 */
+	htab_initialize();
+
+	/* Initialize stab / SLB management except on iSeries
+	 */
+	if (cpu_has_feature(CPU_FTR_SLB))
+		slb_initialize();
+	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
+		stab_initialize(get_paca()->stab_real);
+}
+
+#ifdef CONFIG_SMP
+void __init early_init_mmu_secondary(void)
+{
+	/* Initialize hash table for that CPU */
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
 		mtspr(SPRN_SDR1, _SDR1);
+
+	/* Initialize STAB/SLB. We use a virtual address as it works
+	 * in real mode on pSeries and we want a virutal address on
+	 * iSeries anyway
+	 */
+	if (cpu_has_feature(CPU_FTR_SLB))
+		slb_initialize();
+	else
+		stab_initialize(get_paca()->stab_addr);
 }
+#endif /* CONFIG_SMP */
 
 /*
  * Called by asm hashtable.S for doing lazy icache flush

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors definitions (v3)
  2009-03-20  5:34 ` [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors " Benjamin Herrenschmidt
@ 2009-03-20  9:11   ` Benjamin Herrenschmidt
  0 siblings, 0 replies; 7+ messages in thread
From: Benjamin Herrenschmidt @ 2009-03-20  9:11 UTC (permalink / raw)
  To: linuxppc-dev

On Fri, 2009-03-20 at 16:34 +1100, Benjamin Herrenschmidt wrote:

> Index: linux-work/arch/powerpc/include/asm/pgtable-ppc64.h
> ===================================================================
> --- linux-work.orig/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:49:51.000000000 +1100
> +++ linux-work/arch/powerpc/include/asm/pgtable-ppc64.h	2009-03-20 15:52:59.000000000 +1100
> @@ -80,82 +80,8 @@
>   * Include the PTE bits definitions
>   */
>  #include <asm/pte-hash64.h>
> +#include <asm/pte-common.h
                             ^

The one I committed to "test" doesn't have that obvious typo :-)

Cheers,
Ben.
 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2009-03-20  9:11 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-03-20  5:34 [PATCH 1/6] powerpc/mm: Tweak PTE bit combination definitions (v3) Benjamin Herrenschmidt
2009-03-20  5:34 ` [PATCH 2/6] powerpc/mm: Merge various PTE bits and accessors " Benjamin Herrenschmidt
2009-03-20  9:11   ` Benjamin Herrenschmidt
2009-03-20  5:34 ` [PATCH 3/6] powerpc/mm: Rename arch/powerpc/kernel/mmap.c to mmap_64.c Benjamin Herrenschmidt
2009-03-20  5:34 ` [PATCH 4/6] powerpc/mm: Fix printk type warning in mmu_context_nohash Benjamin Herrenschmidt
2009-03-20  5:34 ` [PATCH 5/6] powerpc/mm: Add option for non-atomic PTE updates to ppc64 Benjamin Herrenschmidt
2009-03-20  5:34 ` [PATCH 6/6] powerpc/mm: Introduce early_init_mmu() on 64-bit Benjamin Herrenschmidt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.