All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64
@ 2016-02-06  0:24 ` Laura Abbott
  0 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Mark Rutland, Ard Biesheuvel
  Cc: Laura Abbott, linux-arm-kernel, linux-kernel

Hi,

This is hopefully the last update to add proper ARCH_SUPPORTS_DEBUG_PAGEALLOC
support for arm64.

Changes since v3:
- More acks
- Pulled out block check into a separate function
- Style fixups
- Comment tweaking

Laura Abbott (3):
  arm64: Drop alloc function from create_mapping
  arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  arm64: ptdump: Indicate whether memory should be faulting

 arch/arm64/Kconfig       |  3 +++
 arch/arm64/mm/dump.c     |  5 +++++
 arch/arm64/mm/mmu.c      | 48 +++++++++++++++++++++++++++++++++++++-----------
 arch/arm64/mm/pageattr.c | 46 ++++++++++++++++++++++++++++++++++++----------
 4 files changed, 81 insertions(+), 21 deletions(-)

-- 
2.5.0

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64
@ 2016-02-06  0:24 ` Laura Abbott
  0 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: linux-arm-kernel

Hi,

This is hopefully the last update to add proper ARCH_SUPPORTS_DEBUG_PAGEALLOC
support for arm64.

Changes since v3:
- More acks
- Pulled out block check into a separate function
- Style fixups
- Comment tweaking

Laura Abbott (3):
  arm64: Drop alloc function from create_mapping
  arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  arm64: ptdump: Indicate whether memory should be faulting

 arch/arm64/Kconfig       |  3 +++
 arch/arm64/mm/dump.c     |  5 +++++
 arch/arm64/mm/mmu.c      | 48 +++++++++++++++++++++++++++++++++++++-----------
 arch/arm64/mm/pageattr.c | 46 ++++++++++++++++++++++++++++++++++++----------
 4 files changed, 81 insertions(+), 21 deletions(-)

-- 
2.5.0

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 1/3] arm64: Drop alloc function from create_mapping
  2016-02-06  0:24 ` Laura Abbott
@ 2016-02-06  0:24   ` Laura Abbott
  -1 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Mark Rutland, Ard Biesheuvel
  Cc: Laura Abbott, linux-arm-kernel, linux-kernel



create_mapping is only used in fixmap_remap_fdt. All the create_mapping
calls need to happen on existing translation table pages without
additional allocations. Rather than have an alloc function be called
and fail, just set it to NULL and catch its use. Also change
the name to create_mapping_noalloc to better capture what exactly is
going on.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/mm/mmu.c | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7711554..ef0d66c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -116,7 +116,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 	pte_t *pte;
 
 	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-		phys_addr_t pte_phys = pgtable_alloc();
+		phys_addr_t pte_phys;
+		BUG_ON(!pgtable_alloc);
+		pte_phys = pgtable_alloc();
 		pte = pte_set_fixmap(pte_phys);
 		if (pmd_sect(*pmd))
 			split_pmd(pmd, pte);
@@ -158,7 +160,9 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 	 * Check for initial section mappings in the pgd/pud and remove them.
 	 */
 	if (pud_none(*pud) || pud_sect(*pud)) {
-		phys_addr_t pmd_phys = pgtable_alloc();
+		phys_addr_t pmd_phys;
+		BUG_ON(!pgtable_alloc);
+		pmd_phys = pgtable_alloc();
 		pmd = pmd_set_fixmap(pmd_phys);
 		if (pud_sect(*pud)) {
 			/*
@@ -223,7 +227,9 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 	unsigned long next;
 
 	if (pgd_none(*pgd)) {
-		phys_addr_t pud_phys = pgtable_alloc();
+		phys_addr_t pud_phys;
+		BUG_ON(!pgtable_alloc);
+		pud_phys = pgtable_alloc();
 		__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
 	}
 	BUG_ON(pgd_bad(*pgd));
@@ -312,7 +318,12 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 	init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -321,7 +332,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 		return;
 	}
 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-			     early_pgtable_alloc);
+			     NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
@@ -680,7 +691,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 	/*
 	 * Make sure that the FDT region can be mapped without the need to
 	 * allocate additional translation table pages, so that it is safe
-	 * to call create_mapping() this early.
+	 * to call create_mapping_noalloc() this early.
 	 *
 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
 	 * be in the same PMD as the rest of the fixmap.
@@ -696,8 +707,8 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 	dt_virt = (void *)dt_virt_base + offset;
 
 	/* map the first chunk so we can read the size from the header */
-	create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-		       SWAPPER_BLOCK_SIZE, prot);
+	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
 	if (fdt_check_header(dt_virt) != 0)
 		return NULL;
@@ -707,7 +718,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 		return NULL;
 
 	if (offset + size > SWAPPER_BLOCK_SIZE)
-		create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
 			       round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
 
 	memblock_reserve(dt_phys, size);
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCHv4 1/3] arm64: Drop alloc function from create_mapping
@ 2016-02-06  0:24   ` Laura Abbott
  0 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: linux-arm-kernel



create_mapping is only used in fixmap_remap_fdt. All the create_mapping
calls need to happen on existing translation table pages without
additional allocations. Rather than have an alloc function be called
and fail, just set it to NULL and catch its use. Also change
the name to create_mapping_noalloc to better capture what exactly is
going on.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/mm/mmu.c | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 7711554..ef0d66c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -116,7 +116,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 	pte_t *pte;
 
 	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-		phys_addr_t pte_phys = pgtable_alloc();
+		phys_addr_t pte_phys;
+		BUG_ON(!pgtable_alloc);
+		pte_phys = pgtable_alloc();
 		pte = pte_set_fixmap(pte_phys);
 		if (pmd_sect(*pmd))
 			split_pmd(pmd, pte);
@@ -158,7 +160,9 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 	 * Check for initial section mappings in the pgd/pud and remove them.
 	 */
 	if (pud_none(*pud) || pud_sect(*pud)) {
-		phys_addr_t pmd_phys = pgtable_alloc();
+		phys_addr_t pmd_phys;
+		BUG_ON(!pgtable_alloc);
+		pmd_phys = pgtable_alloc();
 		pmd = pmd_set_fixmap(pmd_phys);
 		if (pud_sect(*pud)) {
 			/*
@@ -223,7 +227,9 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 	unsigned long next;
 
 	if (pgd_none(*pgd)) {
-		phys_addr_t pud_phys = pgtable_alloc();
+		phys_addr_t pud_phys;
+		BUG_ON(!pgtable_alloc);
+		pud_phys = pgtable_alloc();
 		__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
 	}
 	BUG_ON(pgd_bad(*pgd));
@@ -312,7 +318,12 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 	init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -321,7 +332,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 		return;
 	}
 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-			     early_pgtable_alloc);
+			     NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
@@ -680,7 +691,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 	/*
 	 * Make sure that the FDT region can be mapped without the need to
 	 * allocate additional translation table pages, so that it is safe
-	 * to call create_mapping() this early.
+	 * to call create_mapping_noalloc() this early.
 	 *
 	 * On 64k pages, the FDT will be mapped using PTEs, so we need to
 	 * be in the same PMD as the rest of the fixmap.
@@ -696,8 +707,8 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 	dt_virt = (void *)dt_virt_base + offset;
 
 	/* map the first chunk so we can read the size from the header */
-	create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-		       SWAPPER_BLOCK_SIZE, prot);
+	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
 	if (fdt_check_header(dt_virt) != 0)
 		return NULL;
@@ -707,7 +718,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 		return NULL;
 
 	if (offset + size > SWAPPER_BLOCK_SIZE)
-		create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+		create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
 			       round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
 
 	memblock_reserve(dt_phys, size);
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  2016-02-06  0:24 ` Laura Abbott
@ 2016-02-06  0:24   ` Laura Abbott
  -1 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Mark Rutland, Ard Biesheuvel
  Cc: Laura Abbott, linux-arm-kernel, linux-kernel



ARCH_SUPPORTS_DEBUG_PAGEALLOC provides a hook to map and unmap
pages for debugging purposes. This requires memory be mapped
with PAGE_SIZE mappings since breaking down larger mappings
at runtime will lead to TLB conflicts. Check if debug_pagealloc
is enabled at runtime and if so, map everyting with PAGE_SIZE
pages. Implement the functions to actually map/unmap the
pages at runtime.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/Kconfig       |  3 +++
 arch/arm64/mm/mmu.c      | 19 +++++++++++++++++--
 arch/arm64/mm/pageattr.c | 46 ++++++++++++++++++++++++++++++++++++----------
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8cc6228..0f33218 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -537,6 +537,9 @@ config HOTPLUG_CPU
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
 	def_bool y if SPARSEMEM
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ef0d66c..29dcc83 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
 	} while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
+bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+	/*
+	 * If debug_page_alloc is enabled we must map the linear map
+	 * using pages. However, other mappings created by
+	 * create_mapping_noalloc must use sections in some cases. Allow
+	 * sections to be used in those cases, where no pgtable_alloc
+	 * function is provided.
+	 */
+	return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
 				  phys_addr_t (*pgtable_alloc)(void))
@@ -181,7 +194,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 	do {
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+		      block_mappings_allowed(pgtable_alloc)) {
 			pmd_t old_pmd =*pmd;
 			set_pmd(pmd, __pmd(phys |
 					   pgprot_val(mk_sect_prot(prot))));
@@ -241,7 +255,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys)) {
+		if (use_1G_block(addr, next, phys) &&
+		    block_mappings_allowed(pgtable_alloc)) {
 			pud_t old_pud = *pud;
 			set_pud(pud, __pud(phys |
 					   pgprot_val(mk_sect_prot(prot))));
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 1360a02..7ba87c4 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
 	return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+				pgprot_t set_mask, pgprot_t clear_mask)
+{
+	struct page_change_data data;
+	int ret;
+
+	data.set_mask = set_mask;
+	data.clear_mask = clear_mask;
+
+	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+					&data);
+
+	flush_tlb_kernel_range(start, start + size);
+	return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
 				pgprot_t set_mask, pgprot_t clear_mask)
 {
 	unsigned long start = addr;
 	unsigned long size = PAGE_SIZE*numpages;
 	unsigned long end = start + size;
-	int ret;
-	struct page_change_data data;
 	struct vm_struct *area;
 
 	if (!PAGE_ALIGNED(addr)) {
@@ -72,14 +89,7 @@ static int change_memory_common(unsigned long addr, int numpages,
 	    !(area->flags & VM_ALLOC))
 		return -EINVAL;
 
-	data.set_mask = set_mask;
-	data.clear_mask = clear_mask;
-
-	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-					&data);
-
-	flush_tlb_kernel_range(start, end);
-	return ret;
+	return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -111,3 +121,19 @@ int set_memory_x(unsigned long addr, int numpages)
 					__pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr = (unsigned long) page_address(page);
+
+	if (enable)
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(PTE_VALID),
+					__pgprot(0));
+	else
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(0),
+					__pgprot(PTE_VALID));
+}
+#endif
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
@ 2016-02-06  0:24   ` Laura Abbott
  0 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: linux-arm-kernel



ARCH_SUPPORTS_DEBUG_PAGEALLOC provides a hook to map and unmap
pages for debugging purposes. This requires memory be mapped
with PAGE_SIZE mappings since breaking down larger mappings
at runtime will lead to TLB conflicts. Check if debug_pagealloc
is enabled at runtime and if so, map everyting with PAGE_SIZE
pages. Implement the functions to actually map/unmap the
pages at runtime.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/Kconfig       |  3 +++
 arch/arm64/mm/mmu.c      | 19 +++++++++++++++++--
 arch/arm64/mm/pageattr.c | 46 ++++++++++++++++++++++++++++++++++++----------
 3 files changed, 56 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8cc6228..0f33218 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -537,6 +537,9 @@ config HOTPLUG_CPU
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
 	def_bool y if SPARSEMEM
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ef0d66c..29dcc83 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
 	} while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
+bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+	/*
+	 * If debug_page_alloc is enabled we must map the linear map
+	 * using pages. However, other mappings created by
+	 * create_mapping_noalloc must use sections in some cases. Allow
+	 * sections to be used in those cases, where no pgtable_alloc
+	 * function is provided.
+	 */
+	return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
 				  phys_addr_t (*pgtable_alloc)(void))
@@ -181,7 +194,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 	do {
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+		      block_mappings_allowed(pgtable_alloc)) {
 			pmd_t old_pmd =*pmd;
 			set_pmd(pmd, __pmd(phys |
 					   pgprot_val(mk_sect_prot(prot))));
@@ -241,7 +255,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys)) {
+		if (use_1G_block(addr, next, phys) &&
+		    block_mappings_allowed(pgtable_alloc)) {
 			pud_t old_pud = *pud;
 			set_pud(pud, __pud(phys |
 					   pgprot_val(mk_sect_prot(prot))));
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 1360a02..7ba87c4 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
 	return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+				pgprot_t set_mask, pgprot_t clear_mask)
+{
+	struct page_change_data data;
+	int ret;
+
+	data.set_mask = set_mask;
+	data.clear_mask = clear_mask;
+
+	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+					&data);
+
+	flush_tlb_kernel_range(start, start + size);
+	return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
 				pgprot_t set_mask, pgprot_t clear_mask)
 {
 	unsigned long start = addr;
 	unsigned long size = PAGE_SIZE*numpages;
 	unsigned long end = start + size;
-	int ret;
-	struct page_change_data data;
 	struct vm_struct *area;
 
 	if (!PAGE_ALIGNED(addr)) {
@@ -72,14 +89,7 @@ static int change_memory_common(unsigned long addr, int numpages,
 	    !(area->flags & VM_ALLOC))
 		return -EINVAL;
 
-	data.set_mask = set_mask;
-	data.clear_mask = clear_mask;
-
-	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-					&data);
-
-	flush_tlb_kernel_range(start, end);
-	return ret;
+	return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -111,3 +121,19 @@ int set_memory_x(unsigned long addr, int numpages)
 					__pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr = (unsigned long) page_address(page);
+
+	if (enable)
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(PTE_VALID),
+					__pgprot(0));
+	else
+		__change_memory_common(addr, PAGE_SIZE * numpages,
+					__pgprot(0),
+					__pgprot(PTE_VALID));
+}
+#endif
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCHv4 3/3] arm64: ptdump: Indicate whether memory should be faulting
  2016-02-06  0:24 ` Laura Abbott
@ 2016-02-06  0:24   ` Laura Abbott
  -1 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Mark Rutland, Ard Biesheuvel
  Cc: Laura Abbott, linux-arm-kernel, linux-kernel



With CONFIG_DEBUG_PAGEALLOC, pages do not have the valid bit
set when free in the buddy allocator. Add an indiciation to
the page table dumping code that the valid bit is not set,
'F' for fault, to make this easier to understand.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/mm/dump.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 5a22a11..f381ac9 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -90,6 +90,11 @@ struct prot_bits {
 
 static const struct prot_bits pte_bits[] = {
 	{
+		.mask	= PTE_VALID,
+		.val	= PTE_VALID,
+		.set	= " ",
+		.clear	= "F",
+	}, {
 		.mask	= PTE_USER,
 		.val	= PTE_USER,
 		.set	= "USR",
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCHv4 3/3] arm64: ptdump: Indicate whether memory should be faulting
@ 2016-02-06  0:24   ` Laura Abbott
  0 siblings, 0 replies; 18+ messages in thread
From: Laura Abbott @ 2016-02-06  0:24 UTC (permalink / raw)
  To: linux-arm-kernel



With CONFIG_DEBUG_PAGEALLOC, pages do not have the valid bit
set when free in the buddy allocator. Add an indiciation to
the page table dumping code that the valid bit is not set,
'F' for fault, to make this easier to understand.

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
---
 arch/arm64/mm/dump.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 5a22a11..f381ac9 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -90,6 +90,11 @@ struct prot_bits {
 
 static const struct prot_bits pte_bits[] = {
 	{
+		.mask	= PTE_VALID,
+		.val	= PTE_VALID,
+		.set	= " ",
+		.clear	= "F",
+	}, {
 		.mask	= PTE_USER,
 		.val	= PTE_USER,
 		.set	= "USR",
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64
  2016-02-06  0:24 ` Laura Abbott
@ 2016-02-09 12:19   ` Catalin Marinas
  -1 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-09 12:19 UTC (permalink / raw)
  To: Laura Abbott
  Cc: Will Deacon, Mark Rutland, Ard Biesheuvel, linux-kernel,
	linux-arm-kernel

On Fri, Feb 05, 2016 at 04:24:45PM -0800, Laura Abbott wrote:
> Laura Abbott (3):
>   arm64: Drop alloc function from create_mapping
>   arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
>   arm64: ptdump: Indicate whether memory should be faulting

Patches queued for 4.6. Thanks.

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64
@ 2016-02-09 12:19   ` Catalin Marinas
  0 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-09 12:19 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Feb 05, 2016 at 04:24:45PM -0800, Laura Abbott wrote:
> Laura Abbott (3):
>   arm64: Drop alloc function from create_mapping
>   arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
>   arm64: ptdump: Indicate whether memory should be faulting

Patches queued for 4.6. Thanks.

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  2016-02-06  0:24   ` Laura Abbott
@ 2016-02-10 10:38     ` Catalin Marinas
  -1 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-10 10:38 UTC (permalink / raw)
  To: Laura Abbott
  Cc: Will Deacon, Mark Rutland, Ard Biesheuvel, linux-kernel,
	linux-arm-kernel

On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
>  	} while (pmd++, i++, i < PTRS_PER_PMD);
>  }
>  
> +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> +{
> +
> +	/*
> +	 * If debug_page_alloc is enabled we must map the linear map
> +	 * using pages. However, other mappings created by
> +	 * create_mapping_noalloc must use sections in some cases. Allow
> +	 * sections to be used in those cases, where no pgtable_alloc
> +	 * function is provided.
> +	 */
> +	return !pgtable_alloc || !debug_pagealloc_enabled();
> +}

This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
(defconfig) since debug_pagealloc_enabled() is not defined. A fix went
in next/master as commit 0987684b855c
("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
track it down and merge it via the arm64 tree, otherwise I'll add some
#ifdefs in this function.

BTW, shouldn't the block_mappings_allowed() function be static?

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
@ 2016-02-10 10:38     ` Catalin Marinas
  0 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-10 10:38 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
>  	} while (pmd++, i++, i < PTRS_PER_PMD);
>  }
>  
> +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> +{
> +
> +	/*
> +	 * If debug_page_alloc is enabled we must map the linear map
> +	 * using pages. However, other mappings created by
> +	 * create_mapping_noalloc must use sections in some cases. Allow
> +	 * sections to be used in those cases, where no pgtable_alloc
> +	 * function is provided.
> +	 */
> +	return !pgtable_alloc || !debug_pagealloc_enabled();
> +}

This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
(defconfig) since debug_pagealloc_enabled() is not defined. A fix went
in next/master as commit 0987684b855c
("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
track it down and merge it via the arm64 tree, otherwise I'll add some
#ifdefs in this function.

BTW, shouldn't the block_mappings_allowed() function be static?

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  2016-02-10 10:38     ` Catalin Marinas
@ 2016-02-10 10:40       ` Ard Biesheuvel
  -1 siblings, 0 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2016-02-10 10:40 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Laura Abbott, Will Deacon, Mark Rutland, linux-kernel, linux-arm-kernel

On 10 February 2016 at 11:38, Catalin Marinas <catalin.marinas@arm.com> wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
>>       } while (pmd++, i++, i < PTRS_PER_PMD);
>>  }
>>
>> +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
>> +{
>> +
>> +     /*
>> +      * If debug_page_alloc is enabled we must map the linear map
>> +      * using pages. However, other mappings created by
>> +      * create_mapping_noalloc must use sections in some cases. Allow
>> +      * sections to be used in those cases, where no pgtable_alloc
>> +      * function is provided.
>> +      */
>> +     return !pgtable_alloc || !debug_pagealloc_enabled();
>> +}
>
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.
>
> BTW, shouldn't the block_mappings_allowed() function be static?
>

Yes, it should.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
@ 2016-02-10 10:40       ` Ard Biesheuvel
  0 siblings, 0 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2016-02-10 10:40 UTC (permalink / raw)
  To: linux-arm-kernel

On 10 February 2016 at 11:38, Catalin Marinas <catalin.marinas@arm.com> wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
>>       } while (pmd++, i++, i < PTRS_PER_PMD);
>>  }
>>
>> +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
>> +{
>> +
>> +     /*
>> +      * If debug_page_alloc is enabled we must map the linear map
>> +      * using pages. However, other mappings created by
>> +      * create_mapping_noalloc must use sections in some cases. Allow
>> +      * sections to be used in those cases, where no pgtable_alloc
>> +      * function is provided.
>> +      */
>> +     return !pgtable_alloc || !debug_pagealloc_enabled();
>> +}
>
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.
>
> BTW, shouldn't the block_mappings_allowed() function be static?
>

Yes, it should.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  2016-02-10 10:38     ` Catalin Marinas
@ 2016-02-10 10:54       ` Mark Rutland
  -1 siblings, 0 replies; 18+ messages in thread
From: Mark Rutland @ 2016-02-10 10:54 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Laura Abbott, Will Deacon, Ard Biesheuvel, linux-kernel,
	linux-arm-kernel

On Wed, Feb 10, 2016 at 10:38:14AM +0000, Catalin Marinas wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
> >  	} while (pmd++, i++, i < PTRS_PER_PMD);
> >  }
> >  
> > +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> > +{
> > +
> > +	/*
> > +	 * If debug_page_alloc is enabled we must map the linear map
> > +	 * using pages. However, other mappings created by
> > +	 * create_mapping_noalloc must use sections in some cases. Allow
> > +	 * sections to be used in those cases, where no pgtable_alloc
> > +	 * function is provided.
> > +	 */
> > +	return !pgtable_alloc || !debug_pagealloc_enabled();
> > +}
> 
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.

In the cover for v2 [1],  it was mentioend that we needed a patch [2]
"mm: provide debug_pagealloc_enabled() without CONFIG_DEBUG_PAGEALLOC".
I assumed that was already queued via the mm tree.

> BTW, shouldn't the block_mappings_allowed() function be static?

That would make sense to me, yes.

Mark.

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2016-January/403439.html  
[2] http://article.gmane.org/gmane.linux.kernel.mm/1452

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
@ 2016-02-10 10:54       ` Mark Rutland
  0 siblings, 0 replies; 18+ messages in thread
From: Mark Rutland @ 2016-02-10 10:54 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Feb 10, 2016 at 10:38:14AM +0000, Catalin Marinas wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
> >  	} while (pmd++, i++, i < PTRS_PER_PMD);
> >  }
> >  
> > +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> > +{
> > +
> > +	/*
> > +	 * If debug_page_alloc is enabled we must map the linear map
> > +	 * using pages. However, other mappings created by
> > +	 * create_mapping_noalloc must use sections in some cases. Allow
> > +	 * sections to be used in those cases, where no pgtable_alloc
> > +	 * function is provided.
> > +	 */
> > +	return !pgtable_alloc || !debug_pagealloc_enabled();
> > +}
> 
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.

In the cover for v2 [1],  it was mentioend that we needed a patch [2]
"mm: provide debug_pagealloc_enabled() without CONFIG_DEBUG_PAGEALLOC".
I assumed that was already queued via the mm tree.

> BTW, shouldn't the block_mappings_allowed() function be static?

That would make sense to me, yes.

Mark.

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2016-January/403439.html  
[2] http://article.gmane.org/gmane.linux.kernel.mm/1452

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
  2016-02-10 10:38     ` Catalin Marinas
@ 2016-02-10 11:02       ` Catalin Marinas
  -1 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-10 11:02 UTC (permalink / raw)
  To: Laura Abbott
  Cc: Mark Rutland, Will Deacon, linux-kernel, linux-arm-kernel,
	Ard Biesheuvel

On Wed, Feb 10, 2016 at 10:38:14AM +0000, Catalin Marinas wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
> >  	} while (pmd++, i++, i < PTRS_PER_PMD);
> >  }
> >  
> > +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> > +{
> > +
> > +	/*
> > +	 * If debug_page_alloc is enabled we must map the linear map
> > +	 * using pages. However, other mappings created by
> > +	 * create_mapping_noalloc must use sections in some cases. Allow
> > +	 * sections to be used in those cases, where no pgtable_alloc
> > +	 * function is provided.
> > +	 */
> > +	return !pgtable_alloc || !debug_pagealloc_enabled();
> > +}
> 
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.

It seems that there are other patches in the mm tree requiring this
change. I'll fix it locally (#ifdefs) and revert the change after -rc1.

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
@ 2016-02-10 11:02       ` Catalin Marinas
  0 siblings, 0 replies; 18+ messages in thread
From: Catalin Marinas @ 2016-02-10 11:02 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Feb 10, 2016 at 10:38:14AM +0000, Catalin Marinas wrote:
> On Fri, Feb 05, 2016 at 04:24:47PM -0800, Laura Abbott wrote:
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -149,6 +149,19 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
> >  	} while (pmd++, i++, i < PTRS_PER_PMD);
> >  }
> >  
> > +bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
> > +{
> > +
> > +	/*
> > +	 * If debug_page_alloc is enabled we must map the linear map
> > +	 * using pages. However, other mappings created by
> > +	 * create_mapping_noalloc must use sections in some cases. Allow
> > +	 * sections to be used in those cases, where no pgtable_alloc
> > +	 * function is provided.
> > +	 */
> > +	return !pgtable_alloc || !debug_pagealloc_enabled();
> > +}
> 
> This breaks the build when CONFIG_DEBUG_PAGEALLOC is not enabled
> (defconfig) since debug_pagealloc_enabled() is not defined. A fix went
> in next/master as commit 0987684b855c
> ("mm-slab-clean-up-debug_pagealloc-processing-code-fix"). I need to
> track it down and merge it via the arm64 tree, otherwise I'll add some
> #ifdefs in this function.

It seems that there are other patches in the mm tree requiring this
change. I'll fix it locally (#ifdefs) and revert the change after -rc1.

-- 
Catalin

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2016-02-10 11:02 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-06  0:24 [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64 Laura Abbott
2016-02-06  0:24 ` Laura Abbott
2016-02-06  0:24 ` [PATCHv4 1/3] arm64: Drop alloc function from create_mapping Laura Abbott
2016-02-06  0:24   ` Laura Abbott
2016-02-06  0:24 ` [PATCHv4 2/3] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC Laura Abbott
2016-02-06  0:24   ` Laura Abbott
2016-02-10 10:38   ` Catalin Marinas
2016-02-10 10:38     ` Catalin Marinas
2016-02-10 10:40     ` Ard Biesheuvel
2016-02-10 10:40       ` Ard Biesheuvel
2016-02-10 10:54     ` Mark Rutland
2016-02-10 10:54       ` Mark Rutland
2016-02-10 11:02     ` Catalin Marinas
2016-02-10 11:02       ` Catalin Marinas
2016-02-06  0:24 ` [PATCHv4 3/3] arm64: ptdump: Indicate whether memory should be faulting Laura Abbott
2016-02-06  0:24   ` Laura Abbott
2016-02-09 12:19 ` [PATCHv4 0/3] ARCH_SUPPORTS_DEBUG_PAGEALLOC for arm64 Catalin Marinas
2016-02-09 12:19   ` Catalin Marinas

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.