linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: Logan Gunthorpe <logang@deltatee.com>
Cc: x86@kernel.org, linux-ia64@vger.kernel.org,
	linux-sh@vger.kernel.org, Peter Zijlstra <peterz@infradead.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	platform-driver-x86@vger.kernel.org, linux-mm@kvack.org,
	"H. Peter Anvin" <hpa@zytor.com>, Will Deacon <will@kernel.org>,
	Christoph Hellwig <hch@lst.de>,
	linux-s390@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	Ingo Molnar <mingo@redhat.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Borislav Petkov <bp@alien8.de>, Andy Lutomirski <luto@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	linux-arm-kernel@lists.infradead.org,
	Eric Badger <ebadger@gigaio.com>,
	linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v3 3/7] x86/mm: Thread pgprot_t through init_memory_mapping()
Date: Tue, 3 Mar 2020 10:52:10 +0100	[thread overview]
Message-ID: <20200303095210.GG4380@dhcp22.suse.cz> (raw)
In-Reply-To: <20200221182503.28317-4-logang@deltatee.com>

On Fri 21-02-20 11:24:59, Logan Gunthorpe wrote:
> In prepartion to support a pgprot_t argument for arch_add_memory().
> 
> It's required to move the prototype of init_memory_mapping() seeing
> the original location came before the definition of pgprot_t.
> 
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: x86@kernel.org
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/x86/include/asm/page_types.h |  3 ---
>  arch/x86/include/asm/pgtable.h    |  3 +++
>  arch/x86/kernel/amd_gart_64.c     |  3 ++-
>  arch/x86/mm/init.c                |  9 +++++----
>  arch/x86/mm/init_32.c             |  3 ++-
>  arch/x86/mm/init_64.c             | 32 +++++++++++++++++--------------
>  arch/x86/mm/mm_internal.h         |  3 ++-
>  arch/x86/platform/uv/bios_uv.c    |  3 ++-
>  8 files changed, 34 insertions(+), 25 deletions(-)
> 
> diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
> index c85e15010f48..bf7aa2e290ef 100644
> --- a/arch/x86/include/asm/page_types.h
> +++ b/arch/x86/include/asm/page_types.h
> @@ -73,9 +73,6 @@ static inline phys_addr_t get_max_mapped(void)
>  
>  bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
>  
> -extern unsigned long init_memory_mapping(unsigned long start,
> -					 unsigned long end);
> -
>  extern void initmem_init(void);
>  
>  #endif	/* !__ASSEMBLY__ */
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 7e118660bbd9..48d6a5960f28 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1046,6 +1046,9 @@ static inline void __meminit init_trampoline_default(void)
>  
>  void __init poking_init(void);
>  
> +unsigned long init_memory_mapping(unsigned long start,
> +				  unsigned long end, pgprot_t prot);
> +
>  # ifdef CONFIG_RANDOMIZE_MEMORY
>  void __meminit init_trampoline(void);
>  # else
> diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
> index 4e5f50236048..16133819415c 100644
> --- a/arch/x86/kernel/amd_gart_64.c
> +++ b/arch/x86/kernel/amd_gart_64.c
> @@ -744,7 +744,8 @@ int __init gart_iommu_init(void)
>  
>  	start_pfn = PFN_DOWN(aper_base);
>  	if (!pfn_range_is_mapped(start_pfn, end_pfn))
> -		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
> +		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
> +				    PAGE_KERNEL);
>  
>  	pr_info("PCI-DMA: using GART IOMMU.\n");
>  	iommu_size = check_iommu_size(info.aper_base, aper_size);
> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
> index e7bb483557c9..1bba16c5742b 100644
> --- a/arch/x86/mm/init.c
> +++ b/arch/x86/mm/init.c
> @@ -467,7 +467,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
>   * the physical memory. To access them they are temporarily mapped.
>   */
>  unsigned long __ref init_memory_mapping(unsigned long start,
> -					       unsigned long end)
> +					unsigned long end, pgprot_t prot)
>  {
>  	struct map_range mr[NR_RANGE_MR];
>  	unsigned long ret = 0;
> @@ -481,7 +481,8 @@ unsigned long __ref init_memory_mapping(unsigned long start,
>  
>  	for (i = 0; i < nr_range; i++)
>  		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
> -						   mr[i].page_size_mask);
> +						   mr[i].page_size_mask,
> +						   prot);
>  
>  	add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
>  
> @@ -521,7 +522,7 @@ static unsigned long __init init_range_memory_mapping(
>  		 */
>  		can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
>  				    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
> -		init_memory_mapping(start, end);
> +		init_memory_mapping(start, end, PAGE_KERNEL);
>  		mapped_ram_size += end - start;
>  		can_use_brk_pgt = true;
>  	}
> @@ -661,7 +662,7 @@ void __init init_mem_mapping(void)
>  #endif
>  
>  	/* the ISA range is always mapped regardless of memory holes */
> -	init_memory_mapping(0, ISA_END_ADDRESS);
> +	init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
>  
>  	/* Init the trampoline, possibly with KASLR memory offset */
>  	init_trampoline();
> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
> index 3ec3dac7c268..e25a4218e6ff 100644
> --- a/arch/x86/mm/init_32.c
> +++ b/arch/x86/mm/init_32.c
> @@ -253,7 +253,8 @@ static inline int is_kernel_text(unsigned long addr)
>  unsigned long __init
>  kernel_physical_mapping_init(unsigned long start,
>  			     unsigned long end,
> -			     unsigned long page_size_mask)
> +			     unsigned long page_size_mask,
> +			     pgprot_t prot)
>  {
>  	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
>  	unsigned long last_map_addr = end;
> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
> index 87977a8bfbbf..9e7692080dda 100644
> --- a/arch/x86/mm/init_64.c
> +++ b/arch/x86/mm/init_64.c
> @@ -585,7 +585,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
>   */
>  static unsigned long __meminit
>  phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
> -	      unsigned long page_size_mask, bool init)
> +	      unsigned long page_size_mask, pgprot_t _prot, bool init)
>  {
>  	unsigned long pages = 0, paddr_next;
>  	unsigned long paddr_last = paddr_end;
> @@ -595,7 +595,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
>  	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
>  		pud_t *pud;
>  		pmd_t *pmd;
> -		pgprot_t prot = PAGE_KERNEL;
> +		pgprot_t prot = _prot;
>  
>  		vaddr = (unsigned long)__va(paddr);
>  		pud = pud_page + pud_index(vaddr);
> @@ -644,9 +644,12 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
>  		if (page_size_mask & (1<<PG_LEVEL_1G)) {
>  			pages++;
>  			spin_lock(&init_mm.page_table_lock);
> +
> +			prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
> +
>  			set_pte_init((pte_t *)pud,
>  				     pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
> -					     PAGE_KERNEL_LARGE),
> +					     prot),
>  				     init);
>  			spin_unlock(&init_mm.page_table_lock);
>  			paddr_last = paddr_next;
> @@ -669,7 +672,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
>  
>  static unsigned long __meminit
>  phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
> -	      unsigned long page_size_mask, bool init)
> +	      unsigned long page_size_mask, pgprot_t prot, bool init)
>  {
>  	unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
>  
> @@ -679,7 +682,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
>  
>  	if (!pgtable_l5_enabled())
>  		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
> -				     page_size_mask, init);
> +				     page_size_mask, prot, init);
>  
>  	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
>  		p4d_t *p4d = p4d_page + p4d_index(vaddr);
> @@ -702,13 +705,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
>  		if (!p4d_none(*p4d)) {
>  			pud = pud_offset(p4d, 0);
>  			paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
> -					page_size_mask, init);
> +					page_size_mask, prot, init);
>  			continue;
>  		}
>  
>  		pud = alloc_low_page();
>  		paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
> -					   page_size_mask, init);
> +					   page_size_mask, prot, init);
>  
>  		spin_lock(&init_mm.page_table_lock);
>  		p4d_populate_init(&init_mm, p4d, pud, init);
> @@ -722,7 +725,7 @@ static unsigned long __meminit
>  __kernel_physical_mapping_init(unsigned long paddr_start,
>  			       unsigned long paddr_end,
>  			       unsigned long page_size_mask,
> -			       bool init)
> +			       pgprot_t prot, bool init)
>  {
>  	bool pgd_changed = false;
>  	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
> @@ -743,13 +746,13 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
>  			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
>  						   __pa(vaddr_end),
>  						   page_size_mask,
> -						   init);
> +						   prot, init);
>  			continue;
>  		}
>  
>  		p4d = alloc_low_page();
>  		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
> -					   page_size_mask, init);
> +					   page_size_mask, prot, init);
>  
>  		spin_lock(&init_mm.page_table_lock);
>  		if (pgtable_l5_enabled())
> @@ -778,10 +781,10 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
>  unsigned long __meminit
>  kernel_physical_mapping_init(unsigned long paddr_start,
>  			     unsigned long paddr_end,
> -			     unsigned long page_size_mask)
> +			     unsigned long page_size_mask, pgprot_t prot)
>  {
>  	return __kernel_physical_mapping_init(paddr_start, paddr_end,
> -					      page_size_mask, true);
> +					      page_size_mask, prot, true);
>  }
>  
>  /*
> @@ -796,7 +799,8 @@ kernel_physical_mapping_change(unsigned long paddr_start,
>  			       unsigned long page_size_mask)
>  {
>  	return __kernel_physical_mapping_init(paddr_start, paddr_end,
> -					      page_size_mask, false);
> +					      page_size_mask, PAGE_KERNEL,
> +					      false);
>  }
>  
>  #ifndef CONFIG_NUMA
> @@ -864,7 +868,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
>  	unsigned long start_pfn = start >> PAGE_SHIFT;
>  	unsigned long nr_pages = size >> PAGE_SHIFT;
>  
> -	init_memory_mapping(start, start + size);
> +	init_memory_mapping(start, start + size, PAGE_KERNEL);
>  
>  	return add_pages(nid, start_pfn, nr_pages, params);
>  }
> diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
> index eeae142062ed..3f37b5c80bb3 100644
> --- a/arch/x86/mm/mm_internal.h
> +++ b/arch/x86/mm/mm_internal.h
> @@ -12,7 +12,8 @@ void early_ioremap_page_table_range_init(void);
>  
>  unsigned long kernel_physical_mapping_init(unsigned long start,
>  					     unsigned long end,
> -					     unsigned long page_size_mask);
> +					     unsigned long page_size_mask,
> +					     pgprot_t prot);
>  unsigned long kernel_physical_mapping_change(unsigned long start,
>  					     unsigned long end,
>  					     unsigned long page_size_mask);
> diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
> index 607f58147311..c60255da5a6c 100644
> --- a/arch/x86/platform/uv/bios_uv.c
> +++ b/arch/x86/platform/uv/bios_uv.c
> @@ -352,7 +352,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
>  	if (type == EFI_MEMORY_MAPPED_IO)
>  		return ioremap(phys_addr, size);
>  
> -	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
> +	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
> +					   PAGE_KERNEL);
>  	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
>  		unsigned long top = last_map_pfn << PAGE_SHIFT;
>  		efi_ioremap(top, size - (top - phys_addr), type, attribute);
> -- 
> 2.20.1
> 

-- 
Michal Hocko
SUSE Labs

  parent reply	other threads:[~2020-03-03  9:58 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-21 18:24 [PATCH v3 0/7] Allow setting caching mode in arch_add_memory() for P2PDMA Logan Gunthorpe
2020-02-21 18:24 ` [PATCH v3 1/7] mm/memory_hotplug: Drop the flags field from struct mhp_restrictions Logan Gunthorpe
2020-02-28 21:31   ` Dan Williams
2020-03-03  9:50   ` Michal Hocko
2020-02-21 18:24 ` [PATCH v3 2/7] mm/memory_hotplug: Rename mhp_restrictions to mhp_params Logan Gunthorpe
2020-02-24  9:11   ` David Hildenbrand
2020-02-29 20:44   ` Dan Williams
2020-03-03  9:50   ` Michal Hocko
2020-02-21 18:24 ` [PATCH v3 3/7] x86/mm: Thread pgprot_t through init_memory_mapping() Logan Gunthorpe
2020-02-29 22:37   ` Dan Williams
2020-03-03  9:52   ` Michal Hocko [this message]
2020-02-21 18:25 ` [PATCH v3 4/7] x86/mm: Introduce _set_memory_prot() Logan Gunthorpe
2020-02-29 22:33   ` Dan Williams
2020-03-02 18:46     ` Logan Gunthorpe
2020-02-21 18:25 ` [PATCH v3 5/7] powerpc/mm: Thread pgprot_t through create_section_mapping() Logan Gunthorpe
2020-02-21 18:25 ` [PATCH v3 6/7] mm/memory_hotplug: Add pgprot_t to mhp_params Logan Gunthorpe
2020-02-24  9:26   ` David Hildenbrand
2020-02-29 22:44   ` Dan Williams
2020-03-02 18:55     ` Logan Gunthorpe
2020-03-02 20:26       ` Dan Williams
2020-02-21 18:25 ` [PATCH v3 7/7] mm/memremap: Set caching mode for PCI P2PDMA memory to WC Logan Gunthorpe
2020-02-29 22:47   ` Dan Williams
2020-03-02 21:20     ` Logan Gunthorpe
2020-02-27 17:17 ` [PATCH v3 0/7] Allow setting caching mode in arch_add_memory() for P2PDMA Jason Gunthorpe
2020-02-27 17:21   ` Logan Gunthorpe
2020-02-27 17:43     ` Jason Gunthorpe
2020-02-27 17:54       ` Logan Gunthorpe
2020-02-27 17:55       ` Dan Williams
2020-02-27 18:03         ` Jason Gunthorpe
2020-02-27 18:08           ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200303095210.GG4380@dhcp22.suse.cz \
    --to=mhocko@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=ebadger@gigaio.com \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=logang@deltatee.com \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=platform-driver-x86@vger.kernel.org \
    --cc=tglx@linutronix.de \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).