All of lore.kernel.org
 help / color / mirror / Atom feed
From: panand@redhat.com (Pratyush Anand)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v30 04/11] arm64: mm: allow for unmapping memory region from kernel mapping
Date: Tue, 24 Jan 2017 17:02:20 +0530	[thread overview]
Message-ID: <d83e9567-8d4b-a714-071c-bc134c1f681f@redhat.com> (raw)
In-Reply-To: <20170124085004.3892-3-takahiro.akashi@linaro.org>



On Tuesday 24 January 2017 02:19 PM, AKASHI Takahiro wrote:
> The current implementation of create_mapping_late() is only allowed
> to modify permission attributes (read-only or read-write) against
> the existing kernel mapping.
>
> In this patch, PAGE_KERNEL_INVALID protection attribute is introduced.
> We will now be able to invalidate (or unmap) some part of the existing
> kernel mapping by specifying PAGE_KERNEL_INVALID to create_mapping_late().
>
> This feature will be used in a suceeding kdump patch to protect
> the memory reserved for crash dump kernel once after loaded.
>
> Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
> ---
>  arch/arm64/include/asm/mmu.h           |  2 ++
>  arch/arm64/include/asm/pgtable-hwdef.h |  2 ++
>  arch/arm64/include/asm/pgtable-prot.h  |  1 +
>  arch/arm64/include/asm/pgtable.h       |  4 ++++
>  arch/arm64/mm/mmu.c                    | 29 ++++++++++++++++++++---------
>  5 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 47619411f0ff..a6c1367527bc 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -36,6 +36,8 @@ extern void init_mem_pgprot(void);
>  extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			       unsigned long virt, phys_addr_t size,
>  			       pgprot_t prot, bool page_mappings_only);
> +extern void create_mapping_late(phys_addr_t phys, unsigned long virt,
> +				phys_addr_t size, pgprot_t prot);
>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
>
>  #endif
> diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
> index eb0c2bd90de9..e66efec31ca9 100644
> --- a/arch/arm64/include/asm/pgtable-hwdef.h
> +++ b/arch/arm64/include/asm/pgtable-hwdef.h
> @@ -119,6 +119,7 @@
>  #define PUD_TABLE_BIT		(_AT(pgdval_t, 1) << 1)
>  #define PUD_TYPE_MASK		(_AT(pgdval_t, 3) << 0)
>  #define PUD_TYPE_SECT		(_AT(pgdval_t, 1) << 0)
> +#define PUD_VALID		PUD_TYPE_SECT
>
>  /*
>   * Level 2 descriptor (PMD).
> @@ -128,6 +129,7 @@
>  #define PMD_TYPE_TABLE		(_AT(pmdval_t, 3) << 0)
>  #define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0)
>  #define PMD_TABLE_BIT		(_AT(pmdval_t, 1) << 1)
> +#define PMD_VALID		PMD_TYPE_SECT
>
>  /*
>   * Section
> diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
> index 2142c7726e76..945d84cd5df7 100644
> --- a/arch/arm64/include/asm/pgtable-prot.h
> +++ b/arch/arm64/include/asm/pgtable-prot.h
> @@ -54,6 +54,7 @@
>  #define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
>  #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
>  #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
> +#define PAGE_KERNEL_INVALID	__pgprot(0)
>
>  #define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
>  #define PAGE_HYP_EXEC		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index ffbb9a520563..1904a7c07018 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -364,6 +364,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>
>  #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
>
> +#define pmd_valid(pmd)		(!!(pmd_val(pmd) & PMD_VALID))
> +
>  #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
>  				 PMD_TYPE_TABLE)
>  #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
> @@ -428,6 +430,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>
>  #define pud_none(pud)		(!pud_val(pud))
>  #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
> +#define pud_valid(pud)		(!!(pud_val(pud) & PUD_VALID))

This will break compilation for CONFIG_PGTABLE_LEVELS <= 2

>  #define pud_present(pud)	(pud_val(pud))
>
>  static inline void set_pud(pud_t *pudp, pud_t pud)
> @@ -481,6 +484,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
>
>  #define pgd_none(pgd)		(!pgd_val(pgd))
>  #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
> +#define pgd_valid(pgd)		(!!(pgd_val(pgd) & 1))

This has not been used anywhere.

>  #define pgd_present(pgd)	(pgd_val(pgd))
>
>  static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 17243e43184e..9c7adcce8e4e 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -133,7 +133,8 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
>  		 * Set the contiguous bit for the subsequent group of PTEs if
>  		 * its size and alignment are appropriate.
>  		 */
> -		if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
> +		if ((pgprot_val(prot) & PTE_VALID) &&
> +		    (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0)) {
>  			if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
>  				__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>  			else
> @@ -147,7 +148,8 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
>  		 * After the PTE entry has been populated once, we
>  		 * only allow updates to the permission attributes.
>  		 */
> -		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
> +		BUG_ON(pte_valid(old_pte) && pte_valid(*pte) &&
> +		       !pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
>
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>
> @@ -190,7 +192,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
>  			 * Set the contiguous bit for the subsequent group of
>  			 * PMDs if its size and alignment are appropriate.
>  			 */
> -			if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
> +			if ((pgprot_val(prot) | PMD_VALID) &&
> +			    ((addr | phys) & ~CONT_PMD_MASK) == 0) {
>  				if (end - addr >= CONT_PMD_SIZE)
>  					__prot = __pgprot(pgprot_val(prot) |
>  							  PTE_CONT);
> @@ -203,7 +206,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
>  			 * After the PMD entry has been populated once, we
>  			 * only allow updates to the permission attributes.
>  			 */
> -			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
> +			BUG_ON(pmd_valid(old_pmd) && pmd_valid(*pmd) &&
> +			       !pgattr_change_is_safe(pmd_val(old_pmd),
>  						      pmd_val(*pmd)));
>  		} else {
>  			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
> @@ -263,7 +267,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
>  			 * After the PUD entry has been populated once, we
>  			 * only allow updates to the permission attributes.
>  			 */
> -			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
> +			BUG_ON(pud_valid(old_pud) && pud_valid(*pud) &&
> +			       !pgattr_change_is_safe(pud_val(old_pud),
>  						      pud_val(*pud)));
>  		} else {
>  			alloc_init_pmd(pud, addr, next, phys, prot,
> @@ -344,8 +349,8 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			     pgd_pgtable_alloc, page_mappings_only);
>  }
>
> -static void create_mapping_late(phys_addr_t phys, unsigned long virt,
> -				  phys_addr_t size, pgprot_t prot)
> +void create_mapping_late(phys_addr_t phys, unsigned long virt,
> +			 phys_addr_t size, pgprot_t prot)
>  {
>  	if (virt < VMALLOC_START) {
>  		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
> @@ -791,14 +796,20 @@ int __init arch_ioremap_pmd_supported(void)
>  int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
>  {
>  	BUG_ON(phys & ~PUD_MASK);
> -	set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
> +	set_pud(pud, __pud(phys |
> +			   ((pgprot_val(prot) & PUD_VALID) ?
> +					PUD_TYPE_SECT : 0) |
> +			   pgprot_val(mk_sect_prot(prot))));
>  	return 1;
>  }
>
>  int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
>  {
>  	BUG_ON(phys & ~PMD_MASK);
> -	set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
> +	set_pmd(pmd, __pmd(phys |
> +			   ((pgprot_val(prot) & PMD_VALID) ?
> +					PMD_TYPE_SECT : 0) |
> +			   pgprot_val(mk_sect_prot(prot))));
>  	return 1;
>  }
>
>


~Pratyush

WARNING: multiple messages have this Message-ID (diff)
From: Pratyush Anand <panand@redhat.com>
To: AKASHI Takahiro <takahiro.akashi@linaro.org>,
	catalin.marinas@arm.com, will.deacon@arm.com
Cc: mark.rutland@arm.com, geoff@infradead.org,
	kexec@lists.infradead.org, james.morse@arm.com,
	bauerman@linux.vnet.ibm.com, dyoung@redhat.com,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v30 04/11] arm64: mm: allow for unmapping memory region from kernel mapping
Date: Tue, 24 Jan 2017 17:02:20 +0530	[thread overview]
Message-ID: <d83e9567-8d4b-a714-071c-bc134c1f681f@redhat.com> (raw)
In-Reply-To: <20170124085004.3892-3-takahiro.akashi@linaro.org>



On Tuesday 24 January 2017 02:19 PM, AKASHI Takahiro wrote:
> The current implementation of create_mapping_late() is only allowed
> to modify permission attributes (read-only or read-write) against
> the existing kernel mapping.
>
> In this patch, PAGE_KERNEL_INVALID protection attribute is introduced.
> We will now be able to invalidate (or unmap) some part of the existing
> kernel mapping by specifying PAGE_KERNEL_INVALID to create_mapping_late().
>
> This feature will be used in a suceeding kdump patch to protect
> the memory reserved for crash dump kernel once after loaded.
>
> Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
> ---
>  arch/arm64/include/asm/mmu.h           |  2 ++
>  arch/arm64/include/asm/pgtable-hwdef.h |  2 ++
>  arch/arm64/include/asm/pgtable-prot.h  |  1 +
>  arch/arm64/include/asm/pgtable.h       |  4 ++++
>  arch/arm64/mm/mmu.c                    | 29 ++++++++++++++++++++---------
>  5 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 47619411f0ff..a6c1367527bc 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -36,6 +36,8 @@ extern void init_mem_pgprot(void);
>  extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			       unsigned long virt, phys_addr_t size,
>  			       pgprot_t prot, bool page_mappings_only);
> +extern void create_mapping_late(phys_addr_t phys, unsigned long virt,
> +				phys_addr_t size, pgprot_t prot);
>  extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
>
>  #endif
> diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
> index eb0c2bd90de9..e66efec31ca9 100644
> --- a/arch/arm64/include/asm/pgtable-hwdef.h
> +++ b/arch/arm64/include/asm/pgtable-hwdef.h
> @@ -119,6 +119,7 @@
>  #define PUD_TABLE_BIT		(_AT(pgdval_t, 1) << 1)
>  #define PUD_TYPE_MASK		(_AT(pgdval_t, 3) << 0)
>  #define PUD_TYPE_SECT		(_AT(pgdval_t, 1) << 0)
> +#define PUD_VALID		PUD_TYPE_SECT
>
>  /*
>   * Level 2 descriptor (PMD).
> @@ -128,6 +129,7 @@
>  #define PMD_TYPE_TABLE		(_AT(pmdval_t, 3) << 0)
>  #define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0)
>  #define PMD_TABLE_BIT		(_AT(pmdval_t, 1) << 1)
> +#define PMD_VALID		PMD_TYPE_SECT
>
>  /*
>   * Section
> diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
> index 2142c7726e76..945d84cd5df7 100644
> --- a/arch/arm64/include/asm/pgtable-prot.h
> +++ b/arch/arm64/include/asm/pgtable-prot.h
> @@ -54,6 +54,7 @@
>  #define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
>  #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
>  #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
> +#define PAGE_KERNEL_INVALID	__pgprot(0)
>
>  #define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
>  #define PAGE_HYP_EXEC		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index ffbb9a520563..1904a7c07018 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -364,6 +364,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>
>  #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
>
> +#define pmd_valid(pmd)		(!!(pmd_val(pmd) & PMD_VALID))
> +
>  #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
>  				 PMD_TYPE_TABLE)
>  #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
> @@ -428,6 +430,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
>
>  #define pud_none(pud)		(!pud_val(pud))
>  #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
> +#define pud_valid(pud)		(!!(pud_val(pud) & PUD_VALID))

This will break compilation for CONFIG_PGTABLE_LEVELS <= 2

>  #define pud_present(pud)	(pud_val(pud))
>
>  static inline void set_pud(pud_t *pudp, pud_t pud)
> @@ -481,6 +484,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
>
>  #define pgd_none(pgd)		(!pgd_val(pgd))
>  #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
> +#define pgd_valid(pgd)		(!!(pgd_val(pgd) & 1))

This has not been used anywhere.

>  #define pgd_present(pgd)	(pgd_val(pgd))
>
>  static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 17243e43184e..9c7adcce8e4e 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -133,7 +133,8 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
>  		 * Set the contiguous bit for the subsequent group of PTEs if
>  		 * its size and alignment are appropriate.
>  		 */
> -		if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
> +		if ((pgprot_val(prot) & PTE_VALID) &&
> +		    (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0)) {
>  			if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
>  				__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>  			else
> @@ -147,7 +148,8 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
>  		 * After the PTE entry has been populated once, we
>  		 * only allow updates to the permission attributes.
>  		 */
> -		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
> +		BUG_ON(pte_valid(old_pte) && pte_valid(*pte) &&
> +		       !pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
>
>  	} while (pte++, addr += PAGE_SIZE, addr != end);
>
> @@ -190,7 +192,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
>  			 * Set the contiguous bit for the subsequent group of
>  			 * PMDs if its size and alignment are appropriate.
>  			 */
> -			if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
> +			if ((pgprot_val(prot) | PMD_VALID) &&
> +			    ((addr | phys) & ~CONT_PMD_MASK) == 0) {
>  				if (end - addr >= CONT_PMD_SIZE)
>  					__prot = __pgprot(pgprot_val(prot) |
>  							  PTE_CONT);
> @@ -203,7 +206,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
>  			 * After the PMD entry has been populated once, we
>  			 * only allow updates to the permission attributes.
>  			 */
> -			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
> +			BUG_ON(pmd_valid(old_pmd) && pmd_valid(*pmd) &&
> +			       !pgattr_change_is_safe(pmd_val(old_pmd),
>  						      pmd_val(*pmd)));
>  		} else {
>  			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
> @@ -263,7 +267,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
>  			 * After the PUD entry has been populated once, we
>  			 * only allow updates to the permission attributes.
>  			 */
> -			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
> +			BUG_ON(pud_valid(old_pud) && pud_valid(*pud) &&
> +			       !pgattr_change_is_safe(pud_val(old_pud),
>  						      pud_val(*pud)));
>  		} else {
>  			alloc_init_pmd(pud, addr, next, phys, prot,
> @@ -344,8 +349,8 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>  			     pgd_pgtable_alloc, page_mappings_only);
>  }
>
> -static void create_mapping_late(phys_addr_t phys, unsigned long virt,
> -				  phys_addr_t size, pgprot_t prot)
> +void create_mapping_late(phys_addr_t phys, unsigned long virt,
> +			 phys_addr_t size, pgprot_t prot)
>  {
>  	if (virt < VMALLOC_START) {
>  		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
> @@ -791,14 +796,20 @@ int __init arch_ioremap_pmd_supported(void)
>  int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
>  {
>  	BUG_ON(phys & ~PUD_MASK);
> -	set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
> +	set_pud(pud, __pud(phys |
> +			   ((pgprot_val(prot) & PUD_VALID) ?
> +					PUD_TYPE_SECT : 0) |
> +			   pgprot_val(mk_sect_prot(prot))));
>  	return 1;
>  }
>
>  int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
>  {
>  	BUG_ON(phys & ~PMD_MASK);
> -	set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
> +	set_pmd(pmd, __pmd(phys |
> +			   ((pgprot_val(prot) & PMD_VALID) ?
> +					PMD_TYPE_SECT : 0) |
> +			   pgprot_val(mk_sect_prot(prot))));
>  	return 1;
>  }
>
>


~Pratyush

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  reply	other threads:[~2017-01-24 11:32 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-24  8:46 [PATCH v30 00/11] arm64: add kdump support AKASHI Takahiro
2017-01-24  8:46 ` AKASHI Takahiro
2017-01-24  8:49 ` [PATCH v30 01/11] memblock: add memblock_cap_memory_range() AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-24  8:49 ` [PATCH v30 02/11] arm64: limit memory regions based on DT property, usable-memory-range AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-24  8:49 ` [PATCH v30 03/11] arm64: kdump: reserve memory for crash dump kernel AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-24  8:49 ` [PATCH v30 04/11] arm64: mm: allow for unmapping memory region from kernel mapping AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-24 11:32   ` Pratyush Anand [this message]
2017-01-24 11:32     ` Pratyush Anand
2017-01-25  6:37     ` AKASHI Takahiro
2017-01-25  6:37       ` AKASHI Takahiro
2017-01-25 15:49   ` James Morse
2017-01-25 15:49     ` James Morse
2017-01-26  8:08     ` AKASHI Takahiro
2017-01-26  8:08       ` AKASHI Takahiro
2017-01-24  8:49 ` [PATCH v30 05/11] arm64: kdump: protect crash dump kernel memory AKASHI Takahiro
2017-01-24  8:49   ` AKASHI Takahiro
2017-01-25 17:37   ` James Morse
2017-01-25 17:37     ` James Morse
2017-01-26 11:28     ` AKASHI Takahiro
2017-01-26 11:28       ` AKASHI Takahiro
2017-01-27 11:19       ` James Morse
2017-01-27 11:19         ` James Morse
2017-01-27 17:15         ` AKASHI Takahiro
2017-01-27 17:15           ` AKASHI Takahiro
2017-01-27 18:56           ` Mark Rutland
2017-01-27 18:56             ` Mark Rutland
2017-01-30  8:42             ` AKASHI Takahiro
2017-01-30  8:42               ` AKASHI Takahiro
2017-01-30  8:27           ` AKASHI Takahiro
2017-01-30  8:27             ` AKASHI Takahiro
2017-01-27 13:59   ` James Morse
2017-01-27 13:59     ` James Morse
2017-01-27 15:42     ` AKASHI Takahiro
2017-01-27 15:42       ` AKASHI Takahiro
2017-01-27 19:41       ` Mark Rutland
2017-01-27 19:41         ` Mark Rutland
2017-01-24  8:50 ` [PATCH v30 06/11] arm64: kdump: implement machine_crash_shutdown() AKASHI Takahiro
2017-01-24  8:50   ` AKASHI Takahiro
2017-01-24  8:50 ` [PATCH v30 07/11] arm64: kdump: add VMCOREINFO's for user-space tools AKASHI Takahiro
2017-01-24  8:50   ` AKASHI Takahiro
2017-01-24  8:50 ` [PATCH v30 08/11] arm64: kdump: provide /proc/vmcore file AKASHI Takahiro
2017-01-24  8:50   ` AKASHI Takahiro
2017-01-24  8:50 ` [PATCH v30 09/11] arm64: kdump: enable kdump in defconfig AKASHI Takahiro
2017-01-24  8:50   ` AKASHI Takahiro
2017-01-24  8:50 ` [PATCH v30 10/11] Documentation: kdump: describe arm64 port AKASHI Takahiro
2017-01-24  8:50   ` AKASHI Takahiro
     [not found] ` <20170124084638.3770-1-takahiro.akashi-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org>
2017-01-24  8:53   ` [PATCH v30 11/11] Documentation: dt: chosen properties for arm64 kdump AKASHI Takahiro
2017-01-24  8:53     ` AKASHI Takahiro
2017-01-24  8:53     ` AKASHI Takahiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d83e9567-8d4b-a714-071c-bc134c1f681f@redhat.com \
    --to=panand@redhat.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.