linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, benh@kernel.crashing.org,
	bp@alien8.de, catalin.marinas@arm.com, dan.j.williams@intel.com,
	dave.hansen@linux.intel.com, david@redhat.com,
	ebadger@gigaio.com, hch@lst.de, hpa@zytor.com, jgg@ziepe.ca,
	linux-mm@kvack.org, logang@deltatee.com, luto@kernel.org,
	mhocko@suse.com, mingo@redhat.com, mm-commits@vger.kernel.org,
	mpe@ellerman.id.au, paulus@samba.org, peterz@infradead.org,
	tglx@linutronix.de, torvalds@linux-foundation.org,
	will@kernel.org
Subject: [patch 21/35] x86/mm: thread pgprot_t through init_memory_mapping()
Date: Fri, 10 Apr 2020 14:33:24 -0700	[thread overview]
Message-ID: <20200410213324.Ml0cz5N3H%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200410143047.bf34a933ce1affdc042c7c80@linux-foundation.org>

From: Logan Gunthorpe <logang@deltatee.com>
Subject: x86/mm: thread pgprot_t through init_memory_mapping()

In preparation to support a pgprot_t argument for arch_add_memory().

It's required to move the prototype of init_memory_mapping() seeing the
original location came before the definition of pgprot_t.

Link: http://lkml.kernel.org/r/20200306170846.9333-4-logang@deltatee.com
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 arch/x86/include/asm/page_types.h |    3 --
 arch/x86/include/asm/pgtable.h    |    3 ++
 arch/x86/kernel/amd_gart_64.c     |    3 +-
 arch/x86/mm/init.c                |    9 ++++---
 arch/x86/mm/init_32.c             |    3 +-
 arch/x86/mm/init_64.c             |   32 +++++++++++++++-------------
 arch/x86/mm/mm_internal.h         |    3 +-
 arch/x86/platform/uv/bios_uv.c    |    3 +-
 8 files changed, 34 insertions(+), 25 deletions(-)

--- a/arch/x86/include/asm/page_types.h~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/include/asm/page_types.h
@@ -71,9 +71,6 @@ static inline phys_addr_t get_max_mapped
 
 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
 
-extern unsigned long init_memory_mapping(unsigned long start,
-					 unsigned long end);
-
 extern void initmem_init(void);
 
 #endif	/* !__ASSEMBLY__ */
--- a/arch/x86/include/asm/pgtable.h~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/include/asm/pgtable.h
@@ -1081,6 +1081,9 @@ static inline void __meminit init_trampo
 
 void __init poking_init(void);
 
+unsigned long init_memory_mapping(unsigned long start,
+				  unsigned long end, pgprot_t prot);
+
 # ifdef CONFIG_RANDOMIZE_MEMORY
 void __meminit init_trampoline(void);
 # else
--- a/arch/x86/kernel/amd_gart_64.c~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/kernel/amd_gart_64.c
@@ -744,7 +744,8 @@ int __init gart_iommu_init(void)
 
 	start_pfn = PFN_DOWN(aper_base);
 	if (!pfn_range_is_mapped(start_pfn, end_pfn))
-		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
+				    PAGE_KERNEL);
 
 	pr_info("PCI-DMA: using GART IOMMU.\n");
 	iommu_size = check_iommu_size(info.aper_base, aper_size);
--- a/arch/x86/mm/init_32.c~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/mm/init_32.c
@@ -257,7 +257,8 @@ static inline int __is_kernel_text(unsig
 unsigned long __init
 kernel_physical_mapping_init(unsigned long start,
 			     unsigned long end,
-			     unsigned long page_size_mask)
+			     unsigned long page_size_mask,
+			     pgprot_t prot)
 {
 	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
 	unsigned long last_map_addr = end;
--- a/arch/x86/mm/init_64.c~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/mm/init_64.c
@@ -585,7 +585,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
  */
 static unsigned long __meminit
 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
-	      unsigned long page_size_mask, bool init)
+	      unsigned long page_size_mask, pgprot_t _prot, bool init)
 {
 	unsigned long pages = 0, paddr_next;
 	unsigned long paddr_last = paddr_end;
@@ -595,7 +595,7 @@ phys_pud_init(pud_t *pud_page, unsigned
 	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
 		pud_t *pud;
 		pmd_t *pmd;
-		pgprot_t prot = PAGE_KERNEL;
+		pgprot_t prot = _prot;
 
 		vaddr = (unsigned long)__va(paddr);
 		pud = pud_page + pud_index(vaddr);
@@ -644,9 +644,12 @@ phys_pud_init(pud_t *pud_page, unsigned
 		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 			pages++;
 			spin_lock(&init_mm.page_table_lock);
+
+			prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
+
 			set_pte_init((pte_t *)pud,
 				     pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
-					     PAGE_KERNEL_LARGE),
+					     prot),
 				     init);
 			spin_unlock(&init_mm.page_table_lock);
 			paddr_last = paddr_next;
@@ -669,7 +672,7 @@ phys_pud_init(pud_t *pud_page, unsigned
 
 static unsigned long __meminit
 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
-	      unsigned long page_size_mask, bool init)
+	      unsigned long page_size_mask, pgprot_t prot, bool init)
 {
 	unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
 
@@ -679,7 +682,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned
 
 	if (!pgtable_l5_enabled())
 		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
-				     page_size_mask, init);
+				     page_size_mask, prot, init);
 
 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
 		p4d_t *p4d = p4d_page + p4d_index(vaddr);
@@ -702,13 +705,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned
 		if (!p4d_none(*p4d)) {
 			pud = pud_offset(p4d, 0);
 			paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
-					page_size_mask, init);
+					page_size_mask, prot, init);
 			continue;
 		}
 
 		pud = alloc_low_page();
 		paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
-					   page_size_mask, init);
+					   page_size_mask, prot, init);
 
 		spin_lock(&init_mm.page_table_lock);
 		p4d_populate_init(&init_mm, p4d, pud, init);
@@ -722,7 +725,7 @@ static unsigned long __meminit
 __kernel_physical_mapping_init(unsigned long paddr_start,
 			       unsigned long paddr_end,
 			       unsigned long page_size_mask,
-			       bool init)
+			       pgprot_t prot, bool init)
 {
 	bool pgd_changed = false;
 	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
@@ -743,13 +746,13 @@ __kernel_physical_mapping_init(unsigned
 			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
 						   __pa(vaddr_end),
 						   page_size_mask,
-						   init);
+						   prot, init);
 			continue;
 		}
 
 		p4d = alloc_low_page();
 		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
-					   page_size_mask, init);
+					   page_size_mask, prot, init);
 
 		spin_lock(&init_mm.page_table_lock);
 		if (pgtable_l5_enabled())
@@ -778,10 +781,10 @@ __kernel_physical_mapping_init(unsigned
 unsigned long __meminit
 kernel_physical_mapping_init(unsigned long paddr_start,
 			     unsigned long paddr_end,
-			     unsigned long page_size_mask)
+			     unsigned long page_size_mask, pgprot_t prot)
 {
 	return __kernel_physical_mapping_init(paddr_start, paddr_end,
-					      page_size_mask, true);
+					      page_size_mask, prot, true);
 }
 
 /*
@@ -796,7 +799,8 @@ kernel_physical_mapping_change(unsigned
 			       unsigned long page_size_mask)
 {
 	return __kernel_physical_mapping_init(paddr_start, paddr_end,
-					      page_size_mask, false);
+					      page_size_mask, PAGE_KERNEL,
+					      false);
 }
 
 #ifndef CONFIG_NUMA
@@ -863,7 +867,7 @@ int arch_add_memory(int nid, u64 start,
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 
-	init_memory_mapping(start, start + size);
+	init_memory_mapping(start, start + size, PAGE_KERNEL);
 
 	return add_pages(nid, start_pfn, nr_pages, params);
 }
--- a/arch/x86/mm/init.c~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/mm/init.c
@@ -467,7 +467,7 @@ bool pfn_range_is_mapped(unsigned long s
  * the physical memory. To access them they are temporarily mapped.
  */
 unsigned long __ref init_memory_mapping(unsigned long start,
-					       unsigned long end)
+					unsigned long end, pgprot_t prot)
 {
 	struct map_range mr[NR_RANGE_MR];
 	unsigned long ret = 0;
@@ -481,7 +481,8 @@ unsigned long __ref init_memory_mapping(
 
 	for (i = 0; i < nr_range; i++)
 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
-						   mr[i].page_size_mask);
+						   mr[i].page_size_mask,
+						   prot);
 
 	add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
 
@@ -521,7 +522,7 @@ static unsigned long __init init_range_m
 		 */
 		can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
 				    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
-		init_memory_mapping(start, end);
+		init_memory_mapping(start, end, PAGE_KERNEL);
 		mapped_ram_size += end - start;
 		can_use_brk_pgt = true;
 	}
@@ -661,7 +662,7 @@ void __init init_mem_mapping(void)
 #endif
 
 	/* the ISA range is always mapped regardless of memory holes */
-	init_memory_mapping(0, ISA_END_ADDRESS);
+	init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
 
 	/* Init the trampoline, possibly with KASLR memory offset */
 	init_trampoline();
--- a/arch/x86/mm/mm_internal.h~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/mm/mm_internal.h
@@ -12,7 +12,8 @@ void early_ioremap_page_table_range_init
 
 unsigned long kernel_physical_mapping_init(unsigned long start,
 					     unsigned long end,
-					     unsigned long page_size_mask);
+					     unsigned long page_size_mask,
+					     pgprot_t prot);
 unsigned long kernel_physical_mapping_change(unsigned long start,
 					     unsigned long end,
 					     unsigned long page_size_mask);
--- a/arch/x86/platform/uv/bios_uv.c~x86-mm-thread-pgprot_t-through-init_memory_mapping
+++ a/arch/x86/platform/uv/bios_uv.c
@@ -352,7 +352,8 @@ void __iomem *__init efi_ioremap(unsigne
 	if (type == EFI_MEMORY_MAPPED_IO)
 		return ioremap(phys_addr, size);
 
-	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
+	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
+					   PAGE_KERNEL);
 	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
 		unsigned long top = last_map_pfn << PAGE_SHIFT;
 		efi_ioremap(top, size - (top - phys_addr), type, attribute);
_


  parent reply	other threads:[~2020-04-10 21:33 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-10 21:30 incoming Andrew Morton
2020-04-10 21:32 ` [patch 01/35] hfsplus: fix crash and filesystem corruption when deleting files Andrew Morton
2020-04-10 21:32 ` [patch 02/35] mm, memcg: do not high throttle allocators based on wraparound Andrew Morton
2020-04-10 21:32 ` [patch 03/35] mm, slab_common: fix a typo in comment "eariler"->"earlier" Andrew Morton
2020-04-10 21:32 ` [patch 04/35] docs: mm: slab.h: fix a broken cross-reference Andrew Morton
2020-04-10 21:32 ` [patch 05/35] mm/page_alloc.c: fix kernel-doc warning Andrew Morton
2020-04-10 21:32 ` [patch 06/35] mm/page_alloc: make pcpu_drain_mutex and pcpu_drain static Andrew Morton
2020-04-10 21:32 ` [patch 07/35] mm/gup: fix null pointer dereference detected by coverity Andrew Morton
2020-04-10 22:24   ` Linus Torvalds
2020-04-10 23:53     ` Peter Xu
2020-04-11  0:19       ` Linus Torvalds
2020-04-14  4:04     ` Miles Chen
2020-04-10 21:32 ` [patch 08/35] ocfs2: no need try to truncate file beyond i_size Andrew Morton
2020-04-10 21:32 ` [patch 09/35] mm: cma: NUMA node interface Andrew Morton
2020-04-10 21:32 ` [patch 10/35] mm: hugetlb: optionally allocate gigantic hugepages using cma Andrew Morton
2020-04-10 21:32 ` [patch 11/35] mm/mmap.c: initialize align_offset explicitly for vm_unmapped_area Andrew Morton
2020-04-10 21:32 ` [patch 12/35] mm/memory.c: refactor insert_page to prepare for batched-lock insert Andrew Morton
2020-04-10 21:32 ` [patch 13/35] mm: bring sparc pte_index() semantics inline with other platforms Andrew Morton
2020-04-10 21:32 ` [patch 14/35] mm: define pte_index as macro for x86 Andrew Morton
2020-04-10 21:33 ` [patch 15/35] mm/memory.c: add vm_insert_pages() Andrew Morton
2020-04-10 21:33 ` [patch 16/35] mm/vma: define a default value for VM_DATA_DEFAULT_FLAGS Andrew Morton
2020-04-10 21:33 ` [patch 17/35] mm/vma: introduce VM_ACCESS_FLAGS Andrew Morton
2020-04-10 21:33 ` [patch 18/35] mm/special: create generic fallbacks for pte_special() and pte_mkspecial() Andrew Morton
2020-04-10 21:33 ` [patch 19/35] mm/memory_hotplug: drop the flags field from struct mhp_restrictions Andrew Morton
2020-04-10 21:33 ` [patch 20/35] mm/memory_hotplug: rename mhp_restrictions to mhp_params Andrew Morton
2020-04-10 21:33 ` Andrew Morton [this message]
2020-04-10 21:33 ` [patch 22/35] x86/mm: introduce __set_memory_prot() Andrew Morton
2020-04-10 21:33 ` [patch 23/35] powerpc/mm: thread pgprot_t through create_section_mapping() Andrew Morton
2020-04-10 21:33 ` [patch 24/35] mm/memory_hotplug: add pgprot_t to mhp_params Andrew Morton
2020-04-10 21:33 ` [patch 25/35] mm/memremap: set caching mode for PCI P2PDMA memory to WC Andrew Morton
2020-04-10 21:33 ` [patch 26/35] kmod: make request_module() return an error when autoloading is disabled Andrew Morton
2020-04-10 21:33 ` [patch 27/35] fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once() Andrew Morton
2020-04-10 21:33 ` [patch 28/35] docs: admin-guide: document the kernel.modprobe sysctl Andrew Morton
2020-04-10 21:33 ` [patch 29/35] selftests: kmod: fix handling test numbers above 9 Andrew Morton
2020-04-10 21:33 ` [patch 30/35] selftests: kmod: test disabling module autoloading Andrew Morton
2020-04-10 21:34 ` [patch 31/35] change email address for Pali Rohár Andrew Morton
2020-04-10 21:44   ` Joe Perches
2020-04-10 21:34 ` [patch 32/35] drivers/dma/tegra20-apb-dma.c: fix platform_get_irq.cocci warnings Andrew Morton
2020-04-13 17:54   ` Jon Hunter
2020-04-10 21:34 ` [patch 33/35] fs/seq_file.c: seq_read(): add info message about buggy .next functions Andrew Morton
2020-04-10 21:34 ` [patch 34/35] kernel/gcov/fs.c: gcov_seq_next() should increase position index Andrew Morton
2020-04-10 21:34 ` [patch 35/35] ipc/util.c: sysvipc_find_ipc() " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200410213324.Ml0cz5N3H%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=ebadger@gigaio.com \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=jgg@ziepe.ca \
    --cc=linux-mm@kvack.org \
    --cc=logang@deltatee.com \
    --cc=luto@kernel.org \
    --cc=mhocko@suse.com \
    --cc=mingo@redhat.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).