linux-m68k.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] Rewrite Motorola MMU page-table layout
@ 2020-01-29 10:39 Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 1/5] m68k,mm: Restructure motorola mmu " Peter Zijlstra
                   ` (6 more replies)
  0 siblings, 7 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

Hi!

In order to faciliate Will's READ_ONCE() patches:

  https://lkml.kernel.org/r/20200123153341.19947-1-will@kernel.org

we need to fix m68k/motorola to not have a giant pmd_t. These patches do so and
are tested using ARAnyM/68040.

It would be very good if someone can either test or tell us what emulator to
use for 020/030.

Please consider.


^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 1/5] m68k,mm: Restructure motorola mmu page-table layout
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
@ 2020-01-29 10:39 ` Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 2/5] m68k,mm: Improve kernel_page_table() Peter Zijlstra
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6}
page-table setup, where the last depends on the page-size selected (8k
vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we
explicitly program 7,7,6 and 4K pages in %tc.

However, the current code implements this mightily weird. What it does
is group 16 of those (6 bit) pte tables into one 4k page to not waste
space. The down-side is that that forces pmd_t to be a 16-tuple
pointing to consecutive pte tables.

This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.

Therefore implement a straight forward 7,7,6 3 level page-table setup,
with the addition (for 020/030) of (partial) large-page support. For
now this increases the memory footprint for pte-tables 15 fold.

Tested with ARAnyM/68040 emulation.

Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/m68k/include/asm/motorola_pgtable.h |   15 +-----------
 arch/m68k/include/asm/page.h             |    6 ++---
 arch/m68k/include/asm/pgtable_mm.h       |   10 ++++----
 arch/m68k/mm/kmap.c                      |   36 +++++++++++++------------------
 arch/m68k/mm/motorola.c                  |   28 +++++++++++-------------
 5 files changed, 39 insertions(+), 56 deletions(-)

--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -108,13 +108,7 @@ static inline pte_t pte_modify(pte_t pte
 
 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
-	unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
-	unsigned long *ptr = pmdp->pmd;
-	short i = 16;
-	while (--i >= 0) {
-		*ptr++ = ptbl;
-		ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
-	}
+	pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
 }
 
 static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
@@ -138,12 +132,7 @@ static inline void pud_set(pud_t *pudp,
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define pmd_bad(pmd)		((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
 #define pmd_present(pmd)	(pmd_val(pmd) & _PAGE_TABLE)
-#define pmd_clear(pmdp) ({			\
-	unsigned long *__ptr = pmdp->pmd;	\
-	short __i = 16;				\
-	while (--__i >= 0)			\
-		*__ptr++ = 0;			\
-})
+#define pmd_clear(pmdp)		({ pmd_val(*pmdp) = 0; })
 #define pmd_page(pmd)		virt_to_page(__va(pmd_val(pmd)))
 
 
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -22,9 +22,9 @@
  * These are used to make use of C type-checking..
  */
 #if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3
-typedef struct { unsigned long pmd[16]; } pmd_t;
-#define pmd_val(x)	((&x)->pmd[0])
-#define __pmd(x)	((pmd_t) { { (x) }, })
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x)	((&x)->pmd)
+#define __pmd(x)	((pmd_t) { (x) } )
 #endif
 
 typedef struct { unsigned long pte; } pte_t;
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -36,7 +36,7 @@
 
 /* PMD_SHIFT determines the size of the area a second-level page table can map */
 #if CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT	22
+#define PMD_SHIFT	18
 #endif
 #define PMD_SIZE	(1UL << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
@@ -67,8 +67,8 @@
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #else
-#define PTRS_PER_PTE	1024
-#define PTRS_PER_PMD	8
+#define PTRS_PER_PTE	64
+#define PTRS_PER_PMD	128
 #define PTRS_PER_PGD	128
 #endif
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
@@ -76,8 +76,8 @@
 
 /* Virtual address region for use by kernel_map() */
 #ifdef CONFIG_SUN3
-#define KMAP_START     0x0DC00000
-#define KMAP_END       0x0E000000
+#define KMAP_START	0x0dc00000
+#define KMAP_END	0x0e000000
 #elif defined(CONFIG_COLDFIRE)
 #define KMAP_START	0xe0000000
 #define KMAP_END	0xf0000000
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -24,8 +24,6 @@
 
 #undef DEBUG
 
-#define PTRTREESIZE	(256*1024)
-
 /*
  * For 040/060 we can use the virtual memory area like other architectures,
  * but for 020/030 we want to use early termination page descriptors and we
@@ -50,7 +48,7 @@ static inline void free_io_area(void *ad
 
 #else
 
-#define IO_SIZE		(256*1024)
+#define IO_SIZE		PMD_SIZE
 
 static struct vm_struct *iolist;
 
@@ -81,14 +79,13 @@ static void __free_io_area(void *addr, u
 
 #if CONFIG_PGTABLE_LEVELS == 3
 		if (CPU_IS_020_OR_030) {
-			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
-			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
+			int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
 
 			if (pmd_type == _PAGE_PRESENT) {
-				pmd_dir->pmd[pmd_off] = 0;
-				virtaddr += PTRTREESIZE;
-				size -= PTRTREESIZE;
-				continue;
+				pmd_clear(pmd_dir);
+				virtaddr += PMD_SIZE;
+				size -= PMD_SIZE;
+
 			} else if (pmd_type == 0)
 				continue;
 		}
@@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long ph
 
 	while ((long)size > 0) {
 #ifdef DEBUG
-		if (!(virtaddr & (PTRTREESIZE-1)))
+		if (!(virtaddr & (PMD_SIZE-1)))
 			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
 #endif
 		pgd_dir = pgd_offset_k(virtaddr);
@@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long ph
 
 #if CONFIG_PGTABLE_LEVELS == 3
 		if (CPU_IS_020_OR_030) {
-			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
-			physaddr += PTRTREESIZE;
-			virtaddr += PTRTREESIZE;
-			size -= PTRTREESIZE;
+			pmd_val(*pmd_dir) = physaddr;
+			physaddr += PMD_SIZE;
+			virtaddr += PMD_SIZE;
+			size -= PMD_SIZE;
 		} else
 #endif
 		{
@@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, un
 
 #if CONFIG_PGTABLE_LEVELS == 3
 		if (CPU_IS_020_OR_030) {
-			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
+			unsigned long pmd = pmd_val(*pmd_dir);
 
-			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
-				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
-							 _CACHEMASK040) | cmode;
-				virtaddr += PTRTREESIZE;
-				size -= PTRTREESIZE;
+			if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+				*pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
+				virtaddr += PMD_SIZE;
+				size -= PMD_SIZE;
 				continue;
 			}
 		}
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -116,8 +116,6 @@ static pmd_t * __init kernel_ptr_table(v
 
 static void __init map_node(int node)
 {
-#define PTRTREESIZE (256*1024)
-#define ROOTTREESIZE (32*1024*1024)
 	unsigned long physaddr, virtaddr, size;
 	pgd_t *pgd_dir;
 	p4d_t *p4d_dir;
@@ -135,21 +133,21 @@ static void __init map_node(int node)
 
 	while (size > 0) {
 #ifdef DEBUG
-		if (!(virtaddr & (PTRTREESIZE-1)))
+		if (!(virtaddr & (PMD_SIZE-1)))
 			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
 				virtaddr);
 #endif
 		pgd_dir = pgd_offset_k(virtaddr);
 		if (virtaddr && CPU_IS_020_OR_030) {
-			if (!(virtaddr & (ROOTTREESIZE-1)) &&
-			    size >= ROOTTREESIZE) {
+			if (!(virtaddr & (PGDIR_SIZE-1)) &&
+			    size >= PGDIR_SIZE) {
 #ifdef DEBUG
 				printk ("[very early term]");
 #endif
 				pgd_val(*pgd_dir) = physaddr;
-				size -= ROOTTREESIZE;
-				virtaddr += ROOTTREESIZE;
-				physaddr += ROOTTREESIZE;
+				size -= PGDIR_SIZE;
+				virtaddr += PGDIR_SIZE;
+				physaddr += PGDIR_SIZE;
 				continue;
 			}
 		}
@@ -169,8 +167,8 @@ static void __init map_node(int node)
 #ifdef DEBUG
 				printk ("[early term]");
 #endif
-				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
-				physaddr += PTRTREESIZE;
+				pmd_val(*pmd_dir) = physaddr;
+				physaddr += PMD_SIZE;
 			} else {
 				int i;
 #ifdef DEBUG
@@ -178,15 +176,15 @@ static void __init map_node(int node)
 #endif
 				zero_pgtable = kernel_ptr_table();
 				pte_dir = (pte_t *)zero_pgtable;
-				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
-					_PAGE_TABLE | _PAGE_ACCESSED;
+				pmd_set(pmd_dir, pte_dir);
+
 				pte_val(*pte_dir++) = 0;
 				physaddr += PAGE_SIZE;
-				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
+				for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
 					pte_val(*pte_dir++) = physaddr;
 			}
-			size -= PTRTREESIZE;
-			virtaddr += PTRTREESIZE;
+			size -= PMD_SIZE;
+			virtaddr += PMD_SIZE;
 		} else {
 			if (!pmd_present(*pmd_dir)) {
 #ifdef DEBUG



^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 2/5] m68k,mm: Improve kernel_page_table()
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 1/5] m68k,mm: Restructure motorola mmu " Peter Zijlstra
@ 2020-01-29 10:39 ` Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 3/5] m68k,mm: Use table allocator for pgtables Peter Zijlstra
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

With the PTE-tables now only being 256 bytes, allocating a full page
for them is a giant waste. Start by improving the boot time allocator
such that init_mm initialization will at least have optimal memory
density.

Much thanks to Will Deacon in help with debugging and ferreting out
lost information on these dusty MMUs.

Notes:

 - _TABLE_MASK is reduced to account for the shorter (256 byte)
   alignment of pte-tables, per the manual, table entries should only
   ever have state in the low 4 bits (Used,WrProt,Desc1,Desc0) so it is
   still longer than strictly required. (Thanks Will!!!)

 - Also use kernel_page_table() for the 020/030 zero_pgtable case and
   consequently remove the zero_pgtable init hack (will fix up later).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/m68k/include/asm/motorola_pgtable.h |   13 ++++++
 arch/m68k/mm/init.c                      |    5 --
 arch/m68k/mm/motorola.c                  |   61 +++++++++++++++++--------------
 3 files changed, 46 insertions(+), 33 deletions(-)

--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -23,7 +23,18 @@
 #define _DESCTYPE_MASK	0x003
 
 #define _CACHEMASK040	(~0x060)
-#define _TABLE_MASK	(0xfffffe00)
+
+/*
+ * Currently set to the minimum alignment of table pointers (256 bytes).
+ * The hardware only uses the low 4 bits for state:
+ *
+ *    3 - Used
+ *    2 - Write Protected
+ *  0,1 - Desciptor Type
+ *
+ * and has the rest of the bits reserved.
+ */
+#define _TABLE_MASK	(0xffffff00)
 
 #define _PAGE_TABLE	(_PAGE_SHORT)
 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -42,7 +42,6 @@ EXPORT_SYMBOL(empty_zero_page);
 
 #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
 extern void init_pointer_table(unsigned long ptable);
-extern pmd_t *zero_pgtable;
 #endif
 
 #ifdef CONFIG_MMU
@@ -135,10 +134,6 @@ static inline void init_pointer_tables(v
 		if (pud_present(*pud))
 			init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
 	}
-
-	/* insert also pointer table that we used to unmap the zero page */
-	if (zero_pgtable)
-		init_pointer_table((unsigned long)zero_pgtable);
 #endif
 }
 
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -50,29 +50,37 @@ extern __initdata unsigned long m68k_ini
 
 extern unsigned long availmem;
 
+static pte_t *last_pte_table __initdata = NULL;
+
 static pte_t * __init kernel_page_table(void)
 {
-	pte_t *ptablep;
+	pte_t *pte_table = last_pte_table;
+
+	if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
+		pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+		if (!pte_table) {
+			panic("%s: Failed to allocate %lu bytes align=%lx\n",
+					__func__, PAGE_SIZE, PAGE_SIZE);
+		}
+
+		clear_page(pte_table);
+		__flush_page_to_ram(pte_table);
+		flush_tlb_kernel_page(pte_table);
+		nocache_page(pte_table);
+
+		last_pte_table = pte_table;
+	}
 
-	ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!ptablep)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
-	clear_page(ptablep);
-	__flush_page_to_ram(ptablep);
-	flush_tlb_kernel_page(ptablep);
-	nocache_page(ptablep);
+	last_pte_table += PTRS_PER_PTE;
 
-	return ptablep;
+	return pte_table;
 }
 
-static pmd_t *last_pgtable __initdata = NULL;
-pmd_t *zero_pgtable __initdata = NULL;
+static pmd_t *last_pmd_table __initdata = NULL;
 
 static pmd_t * __init kernel_ptr_table(void)
 {
-	if (!last_pgtable) {
+	if (!last_pmd_table) {
 		unsigned long pmd, last;
 		int i;
 
@@ -91,27 +99,27 @@ static pmd_t * __init kernel_ptr_table(v
 				last = pmd;
 		}
 
-		last_pgtable = (pmd_t *)last;
+		last_pmd_table = (pmd_t *)last;
 #ifdef DEBUG
-		printk("kernel_ptr_init: %p\n", last_pgtable);
+		printk("kernel_ptr_init: %p\n", last_pmd_table);
 #endif
 	}
 
-	last_pgtable += PTRS_PER_PMD;
-	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
-		last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
+	last_pmd_table += PTRS_PER_PMD;
+	if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
+		last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
 							   PAGE_SIZE);
-		if (!last_pgtable)
+		if (!last_pmd_table)
 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
 			      __func__, PAGE_SIZE, PAGE_SIZE);
 
-		clear_page(last_pgtable);
-		__flush_page_to_ram(last_pgtable);
-		flush_tlb_kernel_page(last_pgtable);
-		nocache_page(last_pgtable);
+		clear_page(last_pmd_table);
+		__flush_page_to_ram(last_pmd_table);
+		flush_tlb_kernel_page(last_pmd_table);
+		nocache_page(last_pmd_table);
 	}
 
-	return last_pgtable;
+	return last_pmd_table;
 }
 
 static void __init map_node(int node)
@@ -174,8 +182,7 @@ static void __init map_node(int node)
 #ifdef DEBUG
 				printk ("[zero map]");
 #endif
-				zero_pgtable = kernel_ptr_table();
-				pte_dir = (pte_t *)zero_pgtable;
+				pte_dir = kernel_page_table();
 				pmd_set(pmd_dir, pte_dir);
 
 				pte_val(*pte_dir++) = 0;



^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 3/5] m68k,mm: Use table allocator for pgtables
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 1/5] m68k,mm: Restructure motorola mmu " Peter Zijlstra
  2020-01-29 10:39 ` [PATCH 2/5] m68k,mm: Improve kernel_page_table() Peter Zijlstra
@ 2020-01-29 10:39 ` Peter Zijlstra
  2020-01-29 12:11   ` Will Deacon
  2020-01-29 10:39 ` [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes Peter Zijlstra
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

With the new page-table layout, using full (4k) pages for (256 byte)
pte-tables is immensely wastefull. Move the pte-tables over to the
same allocator already used for the (512 byte) higher level tables
(pgd/pmd).

This reduces the pte-table waste from 15x to 2x.

Due to no longer being bound to 16 consecutive tables, this might
actually already be more efficient than the old code for sparse
tables.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/m68k/include/asm/motorola_pgalloc.h |   54 ++++++-------------------------
 arch/m68k/include/asm/motorola_pgtable.h |    8 ++++
 arch/m68k/include/asm/page.h             |    2 -
 3 files changed, 19 insertions(+), 45 deletions(-)

--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -10,60 +10,28 @@ extern int free_pointer_table(pmd_t *);
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
-	pte_t *pte;
-
-	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-	if (pte) {
-		__flush_page_to_ram(pte);
-		flush_tlb_kernel_page(pte);
-		nocache_page(pte);
-	}
-
-	return pte;
+	return (pte_t *)get_pointer_table();
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	cache_page(pte);
-	free_page((unsigned long) pte);
+	free_pointer_table((void *)pte);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-	struct page *page;
-	pte_t *pte;
+	return (pte_t *)get_pointer_table();
+}
 
-	page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
-	if(!page)
-		return NULL;
-	if (!pgtable_pte_page_ctor(page)) {
-		__free_page(page);
-		return NULL;
-	}
-
-	pte = kmap(page);
-	__flush_page_to_ram(pte);
-	flush_tlb_kernel_page(pte);
-	nocache_page(pte);
-	kunmap(page);
-	return page;
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t page)
-{
-	pgtable_pte_page_dtor(page);
-	cache_page(kmap(page));
-	kunmap(page);
-	__free_page(page);
+static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
+{
+	free_pointer_table((void *)pgtable);
 }
 
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
 				  unsigned long address)
 {
-	pgtable_pte_page_dtor(page);
-	cache_page(kmap(page));
-	kunmap(page);
-	__free_page(page);
+	free_pointer_table((void *)pgtable);
 }
 
 
@@ -102,9 +70,9 @@ static inline void pmd_populate_kernel(s
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
 {
-	pmd_set(pmd, page_address(page));
+	pmd_set(pmd, page);
 }
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -133,7 +133,13 @@ static inline void pud_set(pud_t *pudp,
 #define pmd_bad(pmd)		((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
 #define pmd_present(pmd)	(pmd_val(pmd) & _PAGE_TABLE)
 #define pmd_clear(pmdp)		({ pmd_val(*pmdp) = 0; })
-#define pmd_page(pmd)		virt_to_page(__va(pmd_val(pmd)))
+
+/*
+ * m68k does not have huge pages (020/030 actually could), but generic code
+ * expects pmd_page() to exists, only to then DCE it all. Provide a dummy to
+ * make the compiler happy.
+ */
+#define pmd_page(pmd)		NULL
 
 
 #define pud_none(pud)		(!pud_val(pud))
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -30,7 +30,7 @@ typedef struct { unsigned long pmd; } pm
 typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long pgd; } pgd_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
-typedef struct page *pgtable_t;
+typedef pte_t *pgtable_t;
 
 #define pte_val(x)	((x).pte)
 #define pgd_val(x)	((x).pgd)



^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
                   ` (2 preceding siblings ...)
  2020-01-29 10:39 ` [PATCH 3/5] m68k,mm: Use table allocator for pgtables Peter Zijlstra
@ 2020-01-29 10:39 ` Peter Zijlstra
  2020-01-29 12:17   ` Will Deacon
  2020-01-29 10:39 ` [PATCH 5/5] m68k,mm: Fully initialize the page-table allocator Peter Zijlstra
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

In addition to the PGD/PMD table size (128*4) add a PTE table size
(64*4) to the table allocator. This completely removes the pte-table
overhead compared to the old code, even for dense tables.

Notes:

 - the allocator gained __flush_page_to_ram(), since the old
   page-based allocator had that.

 - the allocator gained a list_empty() check to deal with there not
   being any pages at all.

 - the free mask is extended to cover more than the 8 bits required
   for the (512 byte) PGD/PMD tables.

 - NR_PAGETABLE accounting is restored.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/m68k/include/asm/motorola_pgalloc.h |   24 +++++-----
 arch/m68k/mm/init.c                      |    6 +-
 arch/m68k/mm/memory.c                    |   70 ++++++++++++++++++++-----------
 3 files changed, 61 insertions(+), 39 deletions(-)

--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -5,61 +5,61 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
-extern pmd_t *get_pointer_table(void);
-extern int free_pointer_table(pmd_t *);
+extern void *get_pointer_table(int type);
+extern int free_pointer_table(void *table, int type);
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
-	return (pte_t *)get_pointer_table();
+	return get_pointer_table(1);
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	free_pointer_table((void *)pte);
+	free_pointer_table(pte, 1);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-	return (pte_t *)get_pointer_table();
+	return get_pointer_table(1);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
 {
-	free_pointer_table((void *)pgtable);
+	free_pointer_table(pgtable, 1);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
 				  unsigned long address)
 {
-	free_pointer_table((void *)pgtable);
+	free_pointer_table(pgtable, 1);
 }
 
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-	return get_pointer_table();
+	return get_pointer_table(0);
 }
 
 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-	return free_pointer_table(pmd);
+	return free_pointer_table(pmd, 0);
 }
 
 static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 				 unsigned long address)
 {
-	return free_pointer_table(pmd);
+	return free_pointer_table(pmd, 0);
 }
 
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-	pmd_free(mm, (pmd_t *)pgd);
+	free_pointer_table(pgd, 0);
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	return (pgd_t *)get_pointer_table();
+	return get_pointer_table(0);
 }
 
 
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -41,7 +41,7 @@ void *empty_zero_page;
 EXPORT_SYMBOL(empty_zero_page);
 
 #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
-extern void init_pointer_table(unsigned long ptable);
+extern void init_pointer_table(unsigned long ptable, int type);
 #endif
 
 #ifdef CONFIG_MMU
@@ -127,12 +127,12 @@ static inline void init_pointer_tables(v
 	int i;
 
 	/* insert pointer tables allocated so far into the tablelist */
-	init_pointer_table((unsigned long)kernel_pg_dir);
+	init_pointer_table((unsigned long)kernel_pg_dir, 0);
 	for (i = 0; i < PTRS_PER_PGD; i++) {
 		pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
 
 		if (pud_present(*pud))
-			init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
+			init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]), 0);
 	}
 #endif
 }
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -27,24 +27,34 @@
    arch/sparc/mm/srmmu.c ... */
 
 typedef struct list_head ptable_desc;
-static LIST_HEAD(ptable_list);
+
+static struct list_head ptable_list[2] = {
+	LIST_HEAD_INIT(ptable_list[0]),
+	LIST_HEAD_INIT(ptable_list[1]),
+};
 
 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
-#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
+#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
+
+static const int ptable_shift[2] = {
+	7+2, /* PGD, PMD */
+	6+2, /* PTE */
+};
 
-#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
+#define ptable_size(type) (1U << ptable_shift[type])
+#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
 
-void __init init_pointer_table(unsigned long ptable)
+void __init init_pointer_table(unsigned long ptable, int type)
 {
 	ptable_desc *dp;
 	unsigned long page = ptable & PAGE_MASK;
-	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
+	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
 
 	dp = PD_PTABLE(page);
 	if (!(PD_MARKBITS(dp) & mask)) {
-		PD_MARKBITS(dp) = 0xff;
-		list_add(dp, &ptable_list);
+		PD_MARKBITS(dp) = ptable_mask(type);
+		list_add(dp, &ptable_list[type]);
 	}
 
 	PD_MARKBITS(dp) &= ~mask;
@@ -57,12 +67,10 @@ void __init init_pointer_table(unsigned
 	return;
 }
 
-pmd_t *get_pointer_table (void)
+void *get_pointer_table (int type)
 {
-	ptable_desc *dp = ptable_list.next;
-	unsigned char mask = PD_MARKBITS (dp);
-	unsigned char tmp;
-	unsigned int off;
+	ptable_desc *dp = ptable_list[type].next;
+	unsigned int mask, tmp, off;
 
 	/*
 	 * For a pointer table for a user process address space, a
@@ -70,38 +78,50 @@ pmd_t *get_pointer_table (void)
 	 * page can hold 8 pointer tables.  The page is remapped in
 	 * virtual address space to be noncacheable.
 	 */
-	if (mask == 0) {
+	if (list_empty(&ptable_list[type]) || PD_MARKBITS(dp) == 0) {
 		void *page;
 		ptable_desc *new;
 
 		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
 			return NULL;
 
+		if (type) {
+			/*
+			 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
+			 * SMP.
+			 */
+			pgtable_pte_page_ctor(virt_to_page(page));
+		}
+
+		__flush_page_to_ram(page);
 		flush_tlb_kernel_page(page);
 		nocache_page(page);
 
 		new = PD_PTABLE(page);
-		PD_MARKBITS(new) = 0xfe;
+		PD_MARKBITS(new) = ptable_mask(type) - 1;
 		list_add_tail(new, dp);
 
 		return (pmd_t *)page;
 	}
 
-	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
+	mask = PD_MARKBITS(dp);
+	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
 		;
-	PD_MARKBITS(dp) = mask & ~tmp;
-	if (!PD_MARKBITS(dp)) {
+	mask &= ~tmp;
+	PD_MARKBITS(dp) = mask;
+	if (!mask) {
 		/* move to end of list */
-		list_move_tail(dp, &ptable_list);
+		list_move_tail(dp, &ptable_list[type]);
 	}
-	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
+
+	return page_address(PD_PAGE(dp)) + off;
 }
 
-int free_pointer_table (pmd_t *ptable)
+int free_pointer_table (void *ptable, int type)
 {
 	ptable_desc *dp;
 	unsigned long page = (unsigned long)ptable & PAGE_MASK;
-	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
+	unsigned int mask = 1U << (((unsigned long)ptable - page)/ptable_size(type));
 
 	dp = PD_PTABLE(page);
 	if (PD_MARKBITS (dp) & mask)
@@ -109,18 +129,20 @@ int free_pointer_table (pmd_t *ptable)
 
 	PD_MARKBITS (dp) |= mask;
 
-	if (PD_MARKBITS(dp) == 0xff) {
+	if (PD_MARKBITS(dp) == ptable_mask(type)) {
 		/* all tables in page are free, free page */
 		list_del(dp);
 		cache_page((void *)page);
+		if (type)
+			pgtable_pte_page_dtor(virt_to_page(page));
 		free_page (page);
 		return 1;
-	} else if (ptable_list.next != dp) {
+	} else if (ptable_list[type].next != dp) {
 		/*
 		 * move this descriptor to the front of the list, since
 		 * it has one or more free tables.
 		 */
-		list_move(dp, &ptable_list);
+		list_move(dp, &ptable_list[type]);
 	}
 	return 0;
 }



^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 5/5] m68k,mm: Fully initialize the page-table allocator
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
                   ` (3 preceding siblings ...)
  2020-01-29 10:39 ` [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes Peter Zijlstra
@ 2020-01-29 10:39 ` Peter Zijlstra
  2020-01-29 10:49 ` [PATCH 0/5] Rewrite Motorola MMU page-table layout John Paul Adrian Glaubitz
  2020-01-31  6:31 ` Greg Ungerer
  6 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 10:39 UTC (permalink / raw)
  To: Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon, Peter Zijlstra

Also iterate the PMD tables to populate the PTE table allocator. This
also fully replaces the previous zero_pgtable hack.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/m68k/mm/init.c |   23 +++++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)

--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -124,15 +124,30 @@ void free_initmem(void)
 static inline void init_pointer_tables(void)
 {
 #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
-	int i;
+	int i, j;
 
 	/* insert pointer tables allocated so far into the tablelist */
 	init_pointer_table((unsigned long)kernel_pg_dir, 0);
 	for (i = 0; i < PTRS_PER_PGD; i++) {
-		pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
+		pud_t *pud = (pud_t *)&kernel_pg_dir[i];
+		pmd_t *pmd_dir;
 
-		if (pud_present(*pud))
-			init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]), 0);
+		if (!pud_present(*pud))
+			continue;
+
+		pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]);
+		init_pointer_table((unsigned long)pmd_dir, 0);
+
+		for (j = 0; j < PTRS_PER_PMD; j++) {
+			pmd_t *pmd = &pmd_dir[j];
+			pte_t *pte_dir;
+
+			if (!pmd_present(*pmd))
+				continue;
+
+			pte_dir = (pte_t *)__pmd_page(*pmd);
+			init_pointer_table((unsigned long)pte_dir, 1);
+		}
 	}
 #endif
 }



^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
                   ` (4 preceding siblings ...)
  2020-01-29 10:39 ` [PATCH 5/5] m68k,mm: Fully initialize the page-table allocator Peter Zijlstra
@ 2020-01-29 10:49 ` John Paul Adrian Glaubitz
  2020-01-29 11:54   ` Peter Zijlstra
  2020-01-31  6:31 ` Greg Ungerer
  6 siblings, 1 reply; 28+ messages in thread
From: John Paul Adrian Glaubitz @ 2020-01-29 10:49 UTC (permalink / raw)
  To: Peter Zijlstra, Geert Uytterhoeven
  Cc: linux-m68k, linux-kernel, Will Deacon, Michael Schmitz

Hi Peter!

On 1/29/20 11:39 AM, Peter Zijlstra wrote:
> It would be very good if someone can either test or tell us what emulator to
> use for 020/030.

If possible, please also test on qemu-system, see [1] for a how-to.

I can test the patches on a real machine, Michael Schmitz could maybe
include them in one of his next test builds which we are running on one
of the Amigas I have.

Adrian

> [1] https://wiki.debian.org/M68k/QemuSystemM68k

-- 
 .''`.  John Paul Adrian Glaubitz
: :' :  Debian Developer - glaubitz@debian.org
`. `'   Freie Universitaet Berlin - glaubitz@physik.fu-berlin.de
  `-    GPG: 62FF 8A75 84E0 2956 9546  0006 7426 3B37 F5B5 F913

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 10:49 ` [PATCH 0/5] Rewrite Motorola MMU page-table layout John Paul Adrian Glaubitz
@ 2020-01-29 11:54   ` Peter Zijlstra
  2020-01-29 12:05     ` John Paul Adrian Glaubitz
  2020-01-29 18:52     ` Michael Schmitz
  0 siblings, 2 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 11:54 UTC (permalink / raw)
  To: John Paul Adrian Glaubitz
  Cc: Geert Uytterhoeven, linux-m68k, linux-kernel, Will Deacon,
	Michael Schmitz

On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:

> > [1] https://wiki.debian.org/M68k/QemuSystemM68k

Now, if only debian would actually ship that :/

AFAICT that emulates a q800 which is another 68040 and should thus not
differ from ARAnyM.

I'm fairly confident in the 040 bits, it's the 020/030 things that need
coverage.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 11:54   ` Peter Zijlstra
@ 2020-01-29 12:05     ` John Paul Adrian Glaubitz
  2020-01-29 12:30       ` Peter Zijlstra
  2020-01-29 18:52     ` Michael Schmitz
  1 sibling, 1 reply; 28+ messages in thread
From: John Paul Adrian Glaubitz @ 2020-01-29 12:05 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Geert Uytterhoeven, linux-m68k, linux-kernel, Will Deacon,
	Michael Schmitz

On 1/29/20 12:54 PM, Peter Zijlstra wrote:
> On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:
> 
>>> [1] https://wiki.debian.org/M68k/QemuSystemM68k
> 
> Now, if only debian would actually ship that :/

Debian should receive the QEMU version that supports full m68k emulation
soonish.

> AFAICT that emulates a q800 which is another 68040 and should thus not
> differ from ARAnyM.

Right. You could switch to a different CPU emulation though, Laurent
Vivier should be able to say more on that.

> I'm fairly confident in the 040 bits, it's the 020/030 things that need
> coverage.

I'm currently setting up an Amiga 500 with an ACA-1233n/40 accelerator
which has an 68030 CPU clocked at 40 MHz and 128 MB RAM which will be
used for developing a driver for a new network card card for the Amiga
500 called X-Surf 500.

I can definitely test the patches on that setup, but I certainly won't
have the time to set everything up until after FOSDEM.

Adrian

-- 
 .''`.  John Paul Adrian Glaubitz
: :' :  Debian Developer - glaubitz@debian.org
`. `'   Freie Universitaet Berlin - glaubitz@physik.fu-berlin.de
  `-    GPG: 62FF 8A75 84E0 2956 9546  0006 7426 3B37 F5B5 F913

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 3/5] m68k,mm: Use table allocator for pgtables
  2020-01-29 10:39 ` [PATCH 3/5] m68k,mm: Use table allocator for pgtables Peter Zijlstra
@ 2020-01-29 12:11   ` Will Deacon
  2020-01-29 12:24     ` Peter Zijlstra
  0 siblings, 1 reply; 28+ messages in thread
From: Will Deacon @ 2020-01-29 12:11 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Geert Uytterhoeven, linux-m68k, linux-kernel

On Wed, Jan 29, 2020 at 11:39:44AM +0100, Peter Zijlstra wrote:
> With the new page-table layout, using full (4k) pages for (256 byte)
> pte-tables is immensely wastefull. Move the pte-tables over to the
> same allocator already used for the (512 byte) higher level tables
> (pgd/pmd).
> 
> This reduces the pte-table waste from 15x to 2x.
> 
> Due to no longer being bound to 16 consecutive tables, this might
> actually already be more efficient than the old code for sparse
> tables.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  arch/m68k/include/asm/motorola_pgalloc.h |   54 ++++++-------------------------
>  arch/m68k/include/asm/motorola_pgtable.h |    8 ++++
>  arch/m68k/include/asm/page.h             |    2 -
>  3 files changed, 19 insertions(+), 45 deletions(-)
> 
> --- a/arch/m68k/include/asm/motorola_pgalloc.h
> +++ b/arch/m68k/include/asm/motorola_pgalloc.h
> @@ -10,60 +10,28 @@ extern int free_pointer_table(pmd_t *);
>  
>  static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
>  {
> -	pte_t *pte;
> -
> -	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
> -	if (pte) {
> -		__flush_page_to_ram(pte);
> -		flush_tlb_kernel_page(pte);
> -		nocache_page(pte);
> -	}
> -
> -	return pte;
> +	return (pte_t *)get_pointer_table();

Weirdly, get_pointer_table() seems to elide the __flush_page_to_ram()
call, so you're missing that for ptes with this change. I think it's
probably needed for the higher levels too (and kernel_page_table()
does it for example) so I'd be inclined to add it unconditionally
rather than predicate it on the allocation type introduced by your later
patch.

> --- a/arch/m68k/include/asm/page.h
> +++ b/arch/m68k/include/asm/page.h
> @@ -30,7 +30,7 @@ typedef struct { unsigned long pmd; } pm
>  typedef struct { unsigned long pte; } pte_t;
>  typedef struct { unsigned long pgd; } pgd_t;
>  typedef struct { unsigned long pgprot; } pgprot_t;
> -typedef struct page *pgtable_t;
> +typedef pte_t *pgtable_t;

Urgh, this is a big (cross-arch) mess that we should fix later.

Will

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes
  2020-01-29 10:39 ` [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes Peter Zijlstra
@ 2020-01-29 12:17   ` Will Deacon
  2020-01-29 12:43     ` Peter Zijlstra
  0 siblings, 1 reply; 28+ messages in thread
From: Will Deacon @ 2020-01-29 12:17 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Geert Uytterhoeven, linux-m68k, linux-kernel

On Wed, Jan 29, 2020 at 11:39:45AM +0100, Peter Zijlstra wrote:
> In addition to the PGD/PMD table size (128*4) add a PTE table size
> (64*4) to the table allocator. This completely removes the pte-table
> overhead compared to the old code, even for dense tables.
> 
> Notes:
> 
>  - the allocator gained __flush_page_to_ram(), since the old
>    page-based allocator had that.
> 
>  - the allocator gained a list_empty() check to deal with there not
>    being any pages at all.
> 
>  - the free mask is extended to cover more than the 8 bits required
>    for the (512 byte) PGD/PMD tables.
> 
>  - NR_PAGETABLE accounting is restored.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
>  arch/m68k/include/asm/motorola_pgalloc.h |   24 +++++-----
>  arch/m68k/mm/init.c                      |    6 +-
>  arch/m68k/mm/memory.c                    |   70 ++++++++++++++++++++-----------
>  3 files changed, 61 insertions(+), 39 deletions(-)
> 
> --- a/arch/m68k/include/asm/motorola_pgalloc.h
> +++ b/arch/m68k/include/asm/motorola_pgalloc.h
> @@ -5,61 +5,61 @@
>  #include <asm/tlb.h>
>  #include <asm/tlbflush.h>
>  
> -extern pmd_t *get_pointer_table(void);
> -extern int free_pointer_table(pmd_t *);
> +extern void *get_pointer_table(int type);

Could be prettier/obfuscated with an enum type?

> --- a/arch/m68k/mm/memory.c
> +++ b/arch/m68k/mm/memory.c
> @@ -27,24 +27,34 @@
>     arch/sparc/mm/srmmu.c ... */
>  
>  typedef struct list_head ptable_desc;
> -static LIST_HEAD(ptable_list);
> +
> +static struct list_head ptable_list[2] = {
> +	LIST_HEAD_INIT(ptable_list[0]),
> +	LIST_HEAD_INIT(ptable_list[1]),
> +};
>  
>  #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
>  #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
> -#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
> +#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
> +
> +static const int ptable_shift[2] = {
> +	7+2, /* PGD, PMD */
> +	6+2, /* PTE */
> +};
>  
> -#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
> +#define ptable_size(type) (1U << ptable_shift[type])
> +#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
>  
> -void __init init_pointer_table(unsigned long ptable)
> +void __init init_pointer_table(unsigned long ptable, int type)
>  {
>  	ptable_desc *dp;
>  	unsigned long page = ptable & PAGE_MASK;
> -	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
> +	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
>  
>  	dp = PD_PTABLE(page);
>  	if (!(PD_MARKBITS(dp) & mask)) {
> -		PD_MARKBITS(dp) = 0xff;
> -		list_add(dp, &ptable_list);
> +		PD_MARKBITS(dp) = ptable_mask(type);
> +		list_add(dp, &ptable_list[type]);
>  	}
>  
>  	PD_MARKBITS(dp) &= ~mask;
> @@ -57,12 +67,10 @@ void __init init_pointer_table(unsigned
>  	return;
>  }
>  
> -pmd_t *get_pointer_table (void)
> +void *get_pointer_table (int type)
>  {
> -	ptable_desc *dp = ptable_list.next;
> -	unsigned char mask = PD_MARKBITS (dp);
> -	unsigned char tmp;
> -	unsigned int off;
> +	ptable_desc *dp = ptable_list[type].next;
> +	unsigned int mask, tmp, off;

nit, but if you do:

	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);

then you can leave the existing mask logic as-is.

Will

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 3/5] m68k,mm: Use table allocator for pgtables
  2020-01-29 12:11   ` Will Deacon
@ 2020-01-29 12:24     ` Peter Zijlstra
  0 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 12:24 UTC (permalink / raw)
  To: Will Deacon; +Cc: Geert Uytterhoeven, linux-m68k, linux-kernel

On Wed, Jan 29, 2020 at 12:11:50PM +0000, Will Deacon wrote:
> On Wed, Jan 29, 2020 at 11:39:44AM +0100, Peter Zijlstra wrote:

> > --- a/arch/m68k/include/asm/motorola_pgalloc.h
> > +++ b/arch/m68k/include/asm/motorola_pgalloc.h
> > @@ -10,60 +10,28 @@ extern int free_pointer_table(pmd_t *);
> >  
> >  static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
> >  {
> > -	pte_t *pte;
> > -
> > -	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
> > -	if (pte) {
> > -		__flush_page_to_ram(pte);
> > -		flush_tlb_kernel_page(pte);
> > -		nocache_page(pte);
> > -	}
> > -
> > -	return pte;
> > +	return (pte_t *)get_pointer_table();
> 
> Weirdly, get_pointer_table() seems to elide the __flush_page_to_ram()
> call, so you're missing that for ptes with this change. I think it's
> probably needed for the higher levels too (and kernel_page_table()
> does it for example) so I'd be inclined to add it unconditionally
> rather than predicate it on the allocation type introduced by your later
> patch.

The next patch adds that unconditionally to the table allocator. The
only thing conditional on the type is the PG_PageTable and NR_PAGETABLES
accounting crud.

> > --- a/arch/m68k/include/asm/page.h
> > +++ b/arch/m68k/include/asm/page.h
> > @@ -30,7 +30,7 @@ typedef struct { unsigned long pmd; } pm
> >  typedef struct { unsigned long pte; } pte_t;
> >  typedef struct { unsigned long pgd; } pgd_t;
> >  typedef struct { unsigned long pgprot; } pgprot_t;
> > -typedef struct page *pgtable_t;
> > +typedef pte_t *pgtable_t;
> 
> Urgh, this is a big (cross-arch) mess that we should fix later.

Yes, I ran into this when I did those MMU-gather fixes as well. For this
patch I cribbed what s390 already does.


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 12:05     ` John Paul Adrian Glaubitz
@ 2020-01-29 12:30       ` Peter Zijlstra
  0 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 12:30 UTC (permalink / raw)
  To: John Paul Adrian Glaubitz
  Cc: Geert Uytterhoeven, linux-m68k, linux-kernel, Will Deacon,
	Michael Schmitz

On Wed, Jan 29, 2020 at 01:05:10PM +0100, John Paul Adrian Glaubitz wrote:
> On 1/29/20 12:54 PM, Peter Zijlstra wrote:
> > On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:
> > 
> >>> [1] https://wiki.debian.org/M68k/QemuSystemM68k
> > 
> > Now, if only debian would actually ship that :/
> 
> Debian should receive the QEMU version that supports full m68k emulation
> soonish.

Excellent!

> > AFAICT that emulates a q800 which is another 68040 and should thus not
> > differ from ARAnyM.
> 
> Right. You could switch to a different CPU emulation though, Laurent
> Vivier should be able to say more on that.

The link you provided only mentioned that Q-88 thing, let me go rummage
through the actual qemu-patch to see if it supports more.

> > I'm fairly confident in the 040 bits, it's the 020/030 things that need
> > coverage.
> 
> I'm currently setting up an Amiga 500 with an ACA-1233n/40 accelerator
> which has an 68030 CPU clocked at 40 MHz and 128 MB RAM which will be
> used for developing a driver for a new network card card for the Amiga
> 500 called X-Surf 500.

I remember playing 'Another World' on the Amiga-500, I'm thinking this
accelerator is a wee bit overkill for that though ;-)

> I can definitely test the patches on that setup, but I certainly won't
> have the time to set everything up until after FOSDEM.

That would be great, thanks!

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes
  2020-01-29 12:17   ` Will Deacon
@ 2020-01-29 12:43     ` Peter Zijlstra
  2020-01-29 13:15       ` Will Deacon
  0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 12:43 UTC (permalink / raw)
  To: Will Deacon; +Cc: Geert Uytterhoeven, linux-m68k, linux-kernel

On Wed, Jan 29, 2020 at 12:17:53PM +0000, Will Deacon wrote:
> On Wed, Jan 29, 2020 at 11:39:45AM +0100, Peter Zijlstra wrote:

> > +extern void *get_pointer_table(int type);
> 
> Could be prettier/obfuscated with an enum type?

Definitely, but then we get to bike-shed on names :-)

enum m68k_table_type {
	TABLE_BIG = 0,
	TABLE_SMALL,
};

Is not exactly _that_ much better, and while TABLE_PTE works,
TABLE_PGD_PMD is a bit crap.

> > --- a/arch/m68k/mm/memory.c
> > +++ b/arch/m68k/mm/memory.c

> > -pmd_t *get_pointer_table (void)
> > +void *get_pointer_table (int type)
> >  {
> > -	ptable_desc *dp = ptable_list.next;
> > -	unsigned char mask = PD_MARKBITS (dp);
> > -	unsigned char tmp;
> > -	unsigned int off;
> > +	ptable_desc *dp = ptable_list[type].next;
> > +	unsigned int mask, tmp, off;
> 
> nit, but if you do:
> 
> 	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
> 
> then you can leave the existing mask logic as-is.

Indeed!

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes
  2020-01-29 12:43     ` Peter Zijlstra
@ 2020-01-29 13:15       ` Will Deacon
  0 siblings, 0 replies; 28+ messages in thread
From: Will Deacon @ 2020-01-29 13:15 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Geert Uytterhoeven, linux-m68k, linux-kernel

On Wed, Jan 29, 2020 at 01:43:52PM +0100, Peter Zijlstra wrote:
> On Wed, Jan 29, 2020 at 12:17:53PM +0000, Will Deacon wrote:
> > On Wed, Jan 29, 2020 at 11:39:45AM +0100, Peter Zijlstra wrote:
> 
> > > +extern void *get_pointer_table(int type);
> > 
> > Could be prettier/obfuscated with an enum type?
> 
> Definitely, but then we get to bike-shed on names :-)

At least we don't need an emulator for *that*!

> enum m68k_table_type {
> 	TABLE_BIG = 0,
> 	TABLE_SMALL,
> };
> 
> Is not exactly _that_ much better, and while TABLE_PTE works,
> TABLE_PGD_PMD is a bit crap.

Some alternatives:

TABLE_PXD / TABLE_PTE
TABLE_BRANCH / TABLE_LEAF
TABLE_DIR / TABLE_PTE
TABLE_TI_AB / TABLE_TI_C

Will

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 11:54   ` Peter Zijlstra
  2020-01-29 12:05     ` John Paul Adrian Glaubitz
@ 2020-01-29 18:52     ` Michael Schmitz
  2020-01-29 19:31       ` Peter Zijlstra
  1 sibling, 1 reply; 28+ messages in thread
From: Michael Schmitz @ 2020-01-29 18:52 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: John Paul Adrian Glaubitz, Geert Uytterhoeven, Linux/m68k,
	Linux Kernel Development, Will Deacon

Peter,

On Thu, Jan 30, 2020 at 12:54 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:
>
> > > [1] https://wiki.debian.org/M68k/QemuSystemM68k
>
> Now, if only debian would actually ship that :/
>
> AFAICT that emulates a q800 which is another 68040 and should thus not
> differ from ARAnyM.
>
> I'm fairly confident in the 040 bits, it's the 020/030 things that need
> coverage.

I'll take a look - unless this eats up way more kernel memory for page
tables, it should still boot on my Falcon.

Cheers,

  Michael

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 18:52     ` Michael Schmitz
@ 2020-01-29 19:31       ` Peter Zijlstra
  2020-01-30  7:31         ` Michael Schmitz
  0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-29 19:31 UTC (permalink / raw)
  To: Michael Schmitz
  Cc: John Paul Adrian Glaubitz, Geert Uytterhoeven, Linux/m68k,
	Linux Kernel Development, Will Deacon

On Thu, Jan 30, 2020 at 07:52:11AM +1300, Michael Schmitz wrote:
> Peter,
> 
> On Thu, Jan 30, 2020 at 12:54 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:
> >
> > > > [1] https://wiki.debian.org/M68k/QemuSystemM68k
> >
> > Now, if only debian would actually ship that :/
> >
> > AFAICT that emulates a q800 which is another 68040 and should thus not
> > differ from ARAnyM.
> >
> > I'm fairly confident in the 040 bits, it's the 020/030 things that need
> > coverage.
> 
> I'll take a look - unless this eats up way more kernel memory for page
> tables, it should still boot on my Falcon.

It should actually be better in most cases I think, since we no longer
require all 16 pte-tables to map consecutive (virtual) memory.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 19:31       ` Peter Zijlstra
@ 2020-01-30  7:31         ` Michael Schmitz
  2020-01-30  8:16           ` Peter Zijlstra
  0 siblings, 1 reply; 28+ messages in thread
From: Michael Schmitz @ 2020-01-30  7:31 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: John Paul Adrian Glaubitz, Geert Uytterhoeven, Linux/m68k,
	Linux Kernel Development, Will Deacon

Peter,

Am 30.01.2020 um 08:31 schrieb Peter Zijlstra:
> On Thu, Jan 30, 2020 at 07:52:11AM +1300, Michael Schmitz wrote:
>> Peter,
>>
>> On Thu, Jan 30, 2020 at 12:54 AM Peter Zijlstra <peterz@infradead.org> wrote:
>>>
>>> On Wed, Jan 29, 2020 at 11:49:13AM +0100, John Paul Adrian Glaubitz wrote:
>>>
>>>>> [1] https://wiki.debian.org/M68k/QemuSystemM68k
>>>
>>> Now, if only debian would actually ship that :/
>>>
>>> AFAICT that emulates a q800 which is another 68040 and should thus not
>>> differ from ARAnyM.
>>>
>>> I'm fairly confident in the 040 bits, it's the 020/030 things that need
>>> coverage.
>>
>> I'll take a look - unless this eats up way more kernel memory for page
>> tables, it should still boot on my Falcon.
>
> It should actually be better in most cases I think, since we no longer
> require all 16 pte-tables to map consecutive (virtual) memory.

Not much difference:

              total       used       free     shared    buffers     cached
Mem:         10712      10120        592          0       1860       2276
-/+ buffers/cache:       5984       4728
Swap:      2097144       1552    2095592


vs. vanilla 5.5rc5:
              total       used       free     shared    buffers     cached
Mem:         10716      10104        612          0       1588       2544
-/+ buffers/cache:       5972       4744
Swap:      2097144       1296    2095848

By sheer coincidence, the boot with your patch series happened to run a 
full filesystem check on the root filesystem, so I'd say it got a good 
workout re: paging and swapping (even though it's just a paltry 4 GB).

Haven't tried any VM stress testing yet (not sure what to do for that; 
it's been years since I tried that sort of stuff).

Cheers,

	Michael



^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-30  7:31         ` Michael Schmitz
@ 2020-01-30  8:16           ` Peter Zijlstra
  2020-01-30 19:12             ` Michael Schmitz
  0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-30  8:16 UTC (permalink / raw)
  To: Michael Schmitz
  Cc: John Paul Adrian Glaubitz, Geert Uytterhoeven, Linux/m68k,
	Linux Kernel Development, Will Deacon


Hi Michael,

On Thu, Jan 30, 2020 at 08:31:13PM +1300, Michael Schmitz wrote:

> Not much difference:
> 
>              total       used       free     shared    buffers     cached
> Mem:         10712      10120        592          0       1860       2276
> -/+ buffers/cache:       5984       4728
> Swap:      2097144       1552    2095592
> 
> 
> vs. vanilla 5.5rc5:
>              total       used       free     shared    buffers     cached
> Mem:         10716      10104        612          0       1588       2544
> -/+ buffers/cache:       5972       4744
> Swap:      2097144       1296    2095848
> 
> By sheer coincidence, the boot with your patch series happened to run a full
> filesystem check on the root filesystem, so I'd say it got a good workout
> re: paging and swapping (even though it's just a paltry 4 GB).

Sweet!, can I translate this into a Tested-by: from you?

> Haven't tried any VM stress testing yet (not sure what to do for that; it's
> been years since I tried that sort of stuff).

I think, this not being SMP, doing what you just did tickled just about
everything there is.

There is one more potential issue with MMU-gather / TLB invalidate on
m68k (and a whole bunch of other archs) and I have patches for that
(although I now need to redo the m68k one.

Meanwhile the build robot gifted me with a build issue, and Will had
some nitpicks, so I'll go respin and repost these patches.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-30  8:16           ` Peter Zijlstra
@ 2020-01-30 19:12             ` Michael Schmitz
  0 siblings, 0 replies; 28+ messages in thread
From: Michael Schmitz @ 2020-01-30 19:12 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: John Paul Adrian Glaubitz, Geert Uytterhoeven, Linux/m68k,
	Linux Kernel Development, Will Deacon

Peter,

On 30/01/20 9:16 PM, Peter Zijlstra wrote:
> Hi Michael,
>
> On Thu, Jan 30, 2020 at 08:31:13PM +1300, Michael Schmitz wrote:
>
>> Not much difference:
>>
>>               total       used       free     shared    buffers     cached
>> Mem:         10712      10120        592          0       1860       2276
>> -/+ buffers/cache:       5984       4728
>> Swap:      2097144       1552    2095592
>>
>>
>> vs. vanilla 5.5rc5:
>>               total       used       free     shared    buffers     cached
>> Mem:         10716      10104        612          0       1588       2544
>> -/+ buffers/cache:       5972       4744
>> Swap:      2097144       1296    2095848
>>
>> By sheer coincidence, the boot with your patch series happened to run a full
>> filesystem check on the root filesystem, so I'd say it got a good workout
>> re: paging and swapping (even though it's just a paltry 4 GB).
> Sweet!, can I translate this into a Tested-by: from you?

If the test coverage is sufficient, you may certainly do that.

Cheers,

     Michael

>
>> Haven't tried any VM stress testing yet (not sure what to do for that; it's
>> been years since I tried that sort of stuff).
> I think, this not being SMP, doing what you just did tickled just about
> everything there is.
>
> There is one more potential issue with MMU-gather / TLB invalidate on
> m68k (and a whole bunch of other archs) and I have patches for that
> (although I now need to redo the m68k one.
>
> Meanwhile the build robot gifted me with a build issue, and Will had
> some nitpicks, so I'll go respin and repost these patches.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
                   ` (5 preceding siblings ...)
  2020-01-29 10:49 ` [PATCH 0/5] Rewrite Motorola MMU page-table layout John Paul Adrian Glaubitz
@ 2020-01-31  6:31 ` Greg Ungerer
  2020-01-31  9:38   ` Will Deacon
  6 siblings, 1 reply; 28+ messages in thread
From: Greg Ungerer @ 2020-01-31  6:31 UTC (permalink / raw)
  To: Peter Zijlstra, Geert Uytterhoeven; +Cc: linux-m68k, linux-kernel, Will Deacon

Hi Peter,

On 29/1/20 8:39 pm, Peter Zijlstra wrote:
> Hi!
> 
> In order to faciliate Will's READ_ONCE() patches:
> 
>    https://lkml.kernel.org/r/20200123153341.19947-1-will@kernel.org
> 
> we need to fix m68k/motorola to not have a giant pmd_t. These patches do so and
> are tested using ARAnyM/68040.
> 
> It would be very good if someone can either test or tell us what emulator to
> use for 020/030.

This series breaks compilation for the ColdFire (with MMU) variant of
the m68k family:

   CC      arch/m68k/kernel/sys_m68k.o
In file included from ./arch/m68k/include/asm/pgalloc.h:12,
                  from ./include/asm-generic/tlb.h:16,
                  from ./arch/m68k/include/asm/tlb.h:5,
                  from arch/m68k/kernel/sys_m68k.c:35:
./arch/m68k/include/asm/mcf_pgalloc.h: In function ‘__pte_free_tlb’:
./arch/m68k/include/asm/mcf_pgalloc.h:41:24: error: passing argument 1 of ‘pgtable_pte_page_dtor’ from incompatible pointer type [-Werror=incompatible-pointer-types]
   pgtable_pte_page_dtor(page);
                         ^~~~
In file included from arch/m68k/kernel/sys_m68k.c:13:
./include/linux/mm.h:1949:55: note: expected ‘struct page *’ but argument is of type ‘pgtable_t’ {aka ‘struct <anonymous> *’}
  static inline void pgtable_pte_page_dtor(struct page *page)
                                           ~~~~~~~~~~~~~^~~~
In file included from ./include/linux/mm.h:10,
                  from arch/m68k/kernel/sys_m68k.c:13:
./include/linux/gfp.h:577:40: error: passing argument 1 of ‘__free_pages’ from incompatible pointer type [-Werror=incompatible-pointer-types]
  #define __free_page(page) __free_pages((page), 0)
                                         ^~~~~~
./arch/m68k/include/asm/mcf_pgalloc.h:42:2: note: in expansion of macro ‘__free_page’
   __free_page(page);
   ^~~~~~~~~~~
./include/linux/gfp.h:566:39: note: expected ‘struct page *’ but argument is of type ‘pgtable_t’ {aka ‘struct <anonymous> *’}
  extern void __free_pages(struct page *page, unsigned int order);
                           ~~~~~~~~~~~~~^~~~
cc1: some warnings being treated as errors
scripts/Makefile.build:265: recipe for target 'arch/m68k/kernel/sys_m68k.o' failed


Easy to reproduce. Build for the m5475evb_defconfig.

Regards
Greg


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31  6:31 ` Greg Ungerer
@ 2020-01-31  9:38   ` Will Deacon
  2020-01-31 10:22     ` Peter Zijlstra
  2020-01-31 13:04     ` Greg Ungerer
  0 siblings, 2 replies; 28+ messages in thread
From: Will Deacon @ 2020-01-31  9:38 UTC (permalink / raw)
  To: Greg Ungerer; +Cc: Peter Zijlstra, Geert Uytterhoeven, linux-m68k, linux-kernel

Hi Greg,

On Fri, Jan 31, 2020 at 04:31:48PM +1000, Greg Ungerer wrote:
> On 29/1/20 8:39 pm, Peter Zijlstra wrote:
> > In order to faciliate Will's READ_ONCE() patches:
> > 
> >    https://lkml.kernel.org/r/20200123153341.19947-1-will@kernel.org
> > 
> > we need to fix m68k/motorola to not have a giant pmd_t. These patches do so and
> > are tested using ARAnyM/68040.
> > 
> > It would be very good if someone can either test or tell us what emulator to
> > use for 020/030.
> 
> This series breaks compilation for the ColdFire (with MMU) variant of
> the m68k family:

[...]

> Easy to reproduce. Build for the m5475evb_defconfig.

I've hacked up a fix below, but I don't know how to test whether it actually
works (it does fix the build). However, I also notice that building for
m5475evb_defconfig with vanilla v5.5 triggers this scary looking warning
due to a mismatch between the pgd size and the (8k!) page size:


  | In function 'pgd_alloc.isra.111',
  |     inlined from 'mm_alloc_pgd' at kernel/fork.c:634:12,
  |     inlined from 'mm_init.isra.112' at kernel/fork.c:1043:6:
  | ./arch/m68k/include/asm/string.h:72:25: warning: '__builtin_memcpy' forming offset [4097, 8192] is out of the bounds [0, 4096] of object 'kernel_pg_dir' with type 'pgd_t[1024]' {aka 'struct <anonymous>[1024]'} [-Warray-bounds]
  |  #define memcpy(d, s, n) __builtin_memcpy(d, s, n)
  |                          ^~~~~~~~~~~~~~~~~~~~~~~~~
  | ./arch/m68k/include/asm/mcf_pgalloc.h:93:2: note: in expansion of macro 'memcpy'
  |   memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
  |   ^~~~~~


I think the correct fix is to add this:


diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 82ec54c2eaa4..c335e6a381a1 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -90,7 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 	new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
 	if (!new_pgd)
 		return NULL;
-	memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+	memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
 	memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
 	return new_pgd;
 }


but maybe it should be done as a separate patch give that it's not caused
by the rework we've been doing.

Will

--->8

diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 82ec54c2eaa4..955d54a6e973 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -28,21 +28,22 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
 	return (pmd_t *) pgd;
 }
 
-#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
-	(unsigned long)(page_address(page)))
+#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
 
-#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+#define pmd_populate_kernel pmd_populate
 
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
 
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
 				  unsigned long address)
 {
+	struct page *page = virt_to_page(pgtable);
+
 	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }
 
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
 	struct page *page = alloc_pages(GFP_DMA, 0);
 	pte_t *pte;
@@ -54,20 +55,19 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
 		return NULL;
 	}
 
-	pte = kmap(page);
-	if (pte) {
-		clear_page(pte);
-		__flush_page_to_ram(pte);
-		flush_tlb_kernel_page(pte);
-		nocache_page(pte);
-	}
-	kunmap(page);
+	pte = page_address(page);
+	clear_page(pte);
+	__flush_page_to_ram(pte);
+	flush_tlb_kernel_page(pte);
+	nocache_page(pte);
 
-	return page;
+	return pte;
 }
 
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
 {
+	struct page *page = virt_to_page(pgtable);
+
 	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31  9:38   ` Will Deacon
@ 2020-01-31 10:22     ` Peter Zijlstra
  2020-01-31 11:14       ` Peter Zijlstra
  2020-01-31 13:04     ` Greg Ungerer
  1 sibling, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-31 10:22 UTC (permalink / raw)
  To: Will Deacon; +Cc: Greg Ungerer, Geert Uytterhoeven, linux-m68k, linux-kernel

On Fri, Jan 31, 2020 at 09:38:13AM +0000, Will Deacon wrote:

> > This series breaks compilation for the ColdFire (with MMU) variant of
> > the m68k family:

That's like the same I had reported by the build robots for sun3, which
I fixed by frobbing pgtable_t. That said, this is probably a more
consistent change.

One note below:


> -static inline struct page *pte_alloc_one(struct mm_struct *mm)
> +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
>  {
>  	struct page *page = alloc_pages(GFP_DMA, 0);
>  	pte_t *pte;
> @@ -54,20 +55,19 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
>  		return NULL;
>  	}
>  
> -	pte = kmap(page);
> -	if (pte) {
> -		clear_page(pte);
> -		__flush_page_to_ram(pte);
> -		flush_tlb_kernel_page(pte);
> -		nocache_page(pte);
> -	}
> -	kunmap(page);
> +	pte = page_address(page);
> +	clear_page(pte);
> +	__flush_page_to_ram(pte);
> +	flush_tlb_kernel_page(pte);
> +	nocache_page(pte);

See how it does the nocache dance ^

>  
> -	return page;
> +	return pte;
>  }
>  
> -static inline void pte_free(struct mm_struct *mm, struct page *page)
> +static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
>  {
> +	struct page *page = virt_to_page(pgtable);
> +

but never sets it cached again!

>  	pgtable_pte_page_dtor(page);
>  	__free_page(page);
>  }

Also, the alloc_one_kernel() also suspicioudly doesn't do the nocache
thing.

So either, alloc_one() shouldn't either, or it's all buggered.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31 10:22     ` Peter Zijlstra
@ 2020-01-31 11:14       ` Peter Zijlstra
  2020-01-31 11:18         ` Will Deacon
  0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-31 11:14 UTC (permalink / raw)
  To: Will Deacon; +Cc: Greg Ungerer, Geert Uytterhoeven, linux-m68k, linux-kernel

On Fri, Jan 31, 2020 at 11:22:39AM +0100, Peter Zijlstra wrote:
> On Fri, Jan 31, 2020 at 09:38:13AM +0000, Will Deacon wrote:
> 
> > > This series breaks compilation for the ColdFire (with MMU) variant of
> > > the m68k family:
> 
> That's like the same I had reported by the build robots for sun3, which
> I fixed by frobbing pgtable_t. That said, this is probably a more
> consistent change.
> 
> One note below:
> 
> 
> > -static inline struct page *pte_alloc_one(struct mm_struct *mm)
> > +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
> >  {
> >  	struct page *page = alloc_pages(GFP_DMA, 0);
> >  	pte_t *pte;
> > @@ -54,20 +55,19 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
> >  		return NULL;
> >  	}
> >  
> > -	pte = kmap(page);
> > -	if (pte) {
> > -		clear_page(pte);
> > -		__flush_page_to_ram(pte);
> > -		flush_tlb_kernel_page(pte);
> > -		nocache_page(pte);
> > -	}
> > -	kunmap(page);
> > +	pte = page_address(page);
> > +	clear_page(pte);
> > +	__flush_page_to_ram(pte);
> > +	flush_tlb_kernel_page(pte);
> > +	nocache_page(pte);
> 
> See how it does the nocache dance ^

> So either, alloc_one() shouldn't either, or it's all buggered.

Damn, we weren't going to touch coldfire! :-))

So now I found the coldfire docs, and it looks like this thing is a
software tlb-miss arch, so there is no reason what so ever for this to
be nocache. I'll 'fix' that.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31 11:14       ` Peter Zijlstra
@ 2020-01-31 11:18         ` Will Deacon
  2020-01-31 11:31           ` Peter Zijlstra
  0 siblings, 1 reply; 28+ messages in thread
From: Will Deacon @ 2020-01-31 11:18 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Greg Ungerer, Geert Uytterhoeven, linux-m68k, linux-kernel

On Fri, Jan 31, 2020 at 12:14:59PM +0100, Peter Zijlstra wrote:
> On Fri, Jan 31, 2020 at 11:22:39AM +0100, Peter Zijlstra wrote:
> > On Fri, Jan 31, 2020 at 09:38:13AM +0000, Will Deacon wrote:
> > 
> > > > This series breaks compilation for the ColdFire (with MMU) variant of
> > > > the m68k family:
> > 
> > That's like the same I had reported by the build robots for sun3, which
> > I fixed by frobbing pgtable_t. That said, this is probably a more
> > consistent change.
> > 
> > One note below:
> > 
> > 
> > > -static inline struct page *pte_alloc_one(struct mm_struct *mm)
> > > +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
> > >  {
> > >  	struct page *page = alloc_pages(GFP_DMA, 0);
> > >  	pte_t *pte;
> > > @@ -54,20 +55,19 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
> > >  		return NULL;
> > >  	}
> > >  
> > > -	pte = kmap(page);
> > > -	if (pte) {
> > > -		clear_page(pte);
> > > -		__flush_page_to_ram(pte);
> > > -		flush_tlb_kernel_page(pte);
> > > -		nocache_page(pte);
> > > -	}
> > > -	kunmap(page);
> > > +	pte = page_address(page);
> > > +	clear_page(pte);
> > > +	__flush_page_to_ram(pte);
> > > +	flush_tlb_kernel_page(pte);
> > > +	nocache_page(pte);
> > 
> > See how it does the nocache dance ^
> 
> > So either, alloc_one() shouldn't either, or it's all buggered.
> 
> Damn, we weren't going to touch coldfire! :-))
> 
> So now I found the coldfire docs, and it looks like this thing is a
> software tlb-miss arch, so there is no reason what so ever for this to
> be nocache. I'll 'fix' that.

Does that mean we can drop the GFP_DMA too? If so, this all ends up
looking very similar to the sun3 code wrt alloc/free and they could
probably use the same implementation (since the generic code doesn't
like out pgtable_t definition).

Will

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31 11:18         ` Will Deacon
@ 2020-01-31 11:31           ` Peter Zijlstra
  2020-01-31 11:43             ` Will Deacon
  0 siblings, 1 reply; 28+ messages in thread
From: Peter Zijlstra @ 2020-01-31 11:31 UTC (permalink / raw)
  To: Will Deacon; +Cc: Greg Ungerer, Geert Uytterhoeven, linux-m68k, linux-kernel

On Fri, Jan 31, 2020 at 11:18:24AM +0000, Will Deacon wrote:
> > > > +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
> > > >  {
> > > >  	struct page *page = alloc_pages(GFP_DMA, 0);
> > > >  	pte_t *pte;

> Does that mean we can drop the GFP_DMA too? If so, this all ends up
> looking very similar to the sun3 code wrt alloc/free and they could
> probably use the same implementation (since the generic code doesn't
> like out pgtable_t definition).

Many software TLB archs have limits on what memory the TLB miss handler
itself can access (chicken-egg issues), it might be this is where the
GFP_DMA comes from.

I can't quickly find this in the CFV4e docs, but I'm not really reading
it carefully either.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31 11:31           ` Peter Zijlstra
@ 2020-01-31 11:43             ` Will Deacon
  0 siblings, 0 replies; 28+ messages in thread
From: Will Deacon @ 2020-01-31 11:43 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Greg Ungerer, Geert Uytterhoeven, linux-m68k, linux-kernel

On Fri, Jan 31, 2020 at 12:31:39PM +0100, Peter Zijlstra wrote:
> On Fri, Jan 31, 2020 at 11:18:24AM +0000, Will Deacon wrote:
> > > > > +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
> > > > >  {
> > > > >  	struct page *page = alloc_pages(GFP_DMA, 0);
> > > > >  	pte_t *pte;
> 
> > Does that mean we can drop the GFP_DMA too? If so, this all ends up
> > looking very similar to the sun3 code wrt alloc/free and they could
> > probably use the same implementation (since the generic code doesn't
> > like out pgtable_t definition).
> 
> Many software TLB archs have limits on what memory the TLB miss handler
> itself can access (chicken-egg issues), it might be this is where the
> GFP_DMA comes from.

Fair enough, that sounds plausible.

> I can't quickly find this in the CFV4e docs, but I'm not really reading
> it carefully either.

I can't find any code under arch/m68k/ which suggests it, but for now
I guess we should stick with the old pgtable_t definition for sun3 with
a comment (and keep the GFP_DMA in for coldfire).

Will

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 0/5] Rewrite Motorola MMU page-table layout
  2020-01-31  9:38   ` Will Deacon
  2020-01-31 10:22     ` Peter Zijlstra
@ 2020-01-31 13:04     ` Greg Ungerer
  1 sibling, 0 replies; 28+ messages in thread
From: Greg Ungerer @ 2020-01-31 13:04 UTC (permalink / raw)
  To: Will Deacon; +Cc: Peter Zijlstra, Geert Uytterhoeven, linux-m68k, linux-kernel

Hi Will,

On 31/1/20 7:38 pm, Will Deacon wrote:
> On Fri, Jan 31, 2020 at 04:31:48PM +1000, Greg Ungerer wrote:
>> On 29/1/20 8:39 pm, Peter Zijlstra wrote:
>>> In order to faciliate Will's READ_ONCE() patches:
>>>
>>>     https://lkml.kernel.org/r/20200123153341.19947-1-will@kernel.org
>>>
>>> we need to fix m68k/motorola to not have a giant pmd_t. These patches do so and
>>> are tested using ARAnyM/68040.
>>>
>>> It would be very good if someone can either test or tell us what emulator to
>>> use for 020/030.
>>
>> This series breaks compilation for the ColdFire (with MMU) variant of
>> the m68k family:
> 
> [...]
> 
>> Easy to reproduce. Build for the m5475evb_defconfig.
> 
> I've hacked up a fix below, but I don't know how to test whether it actually
> works (it does fix the build).

Yep, I can confirm that too.
There is no emulators for the MMU based ColdFires (qemu only supports
a non-MMU variant).

I can test on real hardware - but not until Monday when I am back
in my lab. I'll report back then.


> However, I also notice that building for
> m5475evb_defconfig with vanilla v5.5 triggers this scary looking warning
> due to a mismatch between the pgd size and the (8k!) page size:
> 
> 
>    | In function 'pgd_alloc.isra.111',
>    |     inlined from 'mm_alloc_pgd' at kernel/fork.c:634:12,
>    |     inlined from 'mm_init.isra.112' at kernel/fork.c:1043:6:
>    | ./arch/m68k/include/asm/string.h:72:25: warning: '__builtin_memcpy' forming offset [4097, 8192] is out of the bounds [0, 4096] of object 'kernel_pg_dir' with type 'pgd_t[1024]' {aka 'struct <anonymous>[1024]'} [-Warray-bounds]
>    |  #define memcpy(d, s, n) __builtin_memcpy(d, s, n)
>    |                          ^~~~~~~~~~~~~~~~~~~~~~~~~
>    | ./arch/m68k/include/asm/mcf_pgalloc.h:93:2: note: in expansion of macro 'memcpy'
>    |   memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
>    |   ^~~~~~
> 
> 
> I think the correct fix is to add this:
> 
> 
> diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
> index 82ec54c2eaa4..c335e6a381a1 100644
> --- a/arch/m68k/include/asm/mcf_pgalloc.h
> +++ b/arch/m68k/include/asm/mcf_pgalloc.h
> @@ -90,7 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
>   	new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
>   	if (!new_pgd)
>   		return NULL;
> -	memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
> +	memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
>   	memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
>   	return new_pgd;
>   }
> 
> 
> but maybe it should be done as a separate patch give that it's not caused
> by the rework we've been doing.

Indeed I hadn't noticed that before. But good idea, a separate patch
would make sense.

Regards
Greg


> Will
> 
> --->8
> 
> diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
> index 82ec54c2eaa4..955d54a6e973 100644
> --- a/arch/m68k/include/asm/mcf_pgalloc.h
> +++ b/arch/m68k/include/asm/mcf_pgalloc.h
> @@ -28,21 +28,22 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
>   	return (pmd_t *) pgd;
>   }
>   
> -#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
> -	(unsigned long)(page_address(page)))
> +#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
>   
> -#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
> +#define pmd_populate_kernel pmd_populate
>   
> -#define pmd_pgtable(pmd) pmd_page(pmd)
> +#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
>   
> -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
> +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
>   				  unsigned long address)
>   {
> +	struct page *page = virt_to_page(pgtable);
> +
>   	pgtable_pte_page_dtor(page);
>   	__free_page(page);
>   }
>   
> -static inline struct page *pte_alloc_one(struct mm_struct *mm)
> +static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
>   {
>   	struct page *page = alloc_pages(GFP_DMA, 0);
>   	pte_t *pte;
> @@ -54,20 +55,19 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
>   		return NULL;
>   	}
>   
> -	pte = kmap(page);
> -	if (pte) {
> -		clear_page(pte);
> -		__flush_page_to_ram(pte);
> -		flush_tlb_kernel_page(pte);
> -		nocache_page(pte);
> -	}
> -	kunmap(page);
> +	pte = page_address(page);
> +	clear_page(pte);
> +	__flush_page_to_ram(pte);
> +	flush_tlb_kernel_page(pte);
> +	nocache_page(pte);
>   
> -	return page;
> +	return pte;
>   }
>   
> -static inline void pte_free(struct mm_struct *mm, struct page *page)
> +static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
>   {
> +	struct page *page = virt_to_page(pgtable);
> +
>   	pgtable_pte_page_dtor(page);
>   	__free_page(page);
>   }
> 

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2020-01-31 13:14 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-29 10:39 [PATCH 0/5] Rewrite Motorola MMU page-table layout Peter Zijlstra
2020-01-29 10:39 ` [PATCH 1/5] m68k,mm: Restructure motorola mmu " Peter Zijlstra
2020-01-29 10:39 ` [PATCH 2/5] m68k,mm: Improve kernel_page_table() Peter Zijlstra
2020-01-29 10:39 ` [PATCH 3/5] m68k,mm: Use table allocator for pgtables Peter Zijlstra
2020-01-29 12:11   ` Will Deacon
2020-01-29 12:24     ` Peter Zijlstra
2020-01-29 10:39 ` [PATCH 4/5] m68k,mm: Extend table allocator for multiple sizes Peter Zijlstra
2020-01-29 12:17   ` Will Deacon
2020-01-29 12:43     ` Peter Zijlstra
2020-01-29 13:15       ` Will Deacon
2020-01-29 10:39 ` [PATCH 5/5] m68k,mm: Fully initialize the page-table allocator Peter Zijlstra
2020-01-29 10:49 ` [PATCH 0/5] Rewrite Motorola MMU page-table layout John Paul Adrian Glaubitz
2020-01-29 11:54   ` Peter Zijlstra
2020-01-29 12:05     ` John Paul Adrian Glaubitz
2020-01-29 12:30       ` Peter Zijlstra
2020-01-29 18:52     ` Michael Schmitz
2020-01-29 19:31       ` Peter Zijlstra
2020-01-30  7:31         ` Michael Schmitz
2020-01-30  8:16           ` Peter Zijlstra
2020-01-30 19:12             ` Michael Schmitz
2020-01-31  6:31 ` Greg Ungerer
2020-01-31  9:38   ` Will Deacon
2020-01-31 10:22     ` Peter Zijlstra
2020-01-31 11:14       ` Peter Zijlstra
2020-01-31 11:18         ` Will Deacon
2020-01-31 11:31           ` Peter Zijlstra
2020-01-31 11:43             ` Will Deacon
2020-01-31 13:04     ` Greg Ungerer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).