linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Christophe Leroy <christophe.leroy@c-s.fr>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	 npiggin@gmail.com
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: [PATCH v1 09/10] powerpc/mm: make __ioremap_caller() common to PPC32 and PPC64
Date: Tue, 13 Aug 2019 20:11:42 +0000 (UTC)	[thread overview]
Message-ID: <6aab930b3791d722784bada7e975a8d9504e7753.1565726867.git.christophe.leroy@c-s.fr> (raw)
In-Reply-To: <6bc35eca507359075528bc0e55938bc1ce8ee485.1565726867.git.christophe.leroy@c-s.fr>

__ioremap_caller() do the same thing. Define a common one.

__ioremap() is not reused because most of the tests included in
it are unnecessary when coming from __ioremap_caller()

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/mm/ioremap.c    | 99 ++++++++++++++++++++++++++++++++++++++++++++
 arch/powerpc/mm/pgtable_32.c | 75 ---------------------------------
 arch/powerpc/mm/pgtable_64.c | 61 ---------------------------
 3 files changed, 99 insertions(+), 136 deletions(-)

diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 889ee656cf64..537c9148cea1 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -76,6 +76,105 @@ void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long f
 }
 EXPORT_SYMBOL(ioremap_prot);
 
+int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
+			 pgprot_t prot, int nid)
+{
+	unsigned long i;
+
+	for (i = 0; i < size; i += PAGE_SIZE) {
+		int err = map_kernel_page(ea + i, pa + i, prot);
+
+		if (err) {
+			if (slab_is_available())
+				unmap_kernel_range(ea, size);
+			else
+				WARN_ON_ONCE(1); /* Should clean up */
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
+			       pgprot_t prot, void *caller)
+{
+	phys_addr_t pa = addr & PAGE_MASK;
+	int ret;
+	unsigned long va;
+
+	size = PAGE_ALIGN(addr + size) - pa;
+
+#ifdef CONFIG_PPC64
+	/* We don't support the 4K PFN hack with ioremap */
+	if (pgprot_val(prot) & H_PAGE_4K_PFN)
+		return NULL;
+#else
+	/*
+	 * If the address lies within the first 16 MB, assume it's in ISA
+	 * memory space
+	 */
+	if (pa < SZ_16M)
+		pa += _ISA_MEM_BASE;
+
+#ifndef CONFIG_CRASH_DUMP
+	/*
+	 * Don't allow anybody to remap normal RAM that we're using.
+	 * mem_init() sets high_memory so only do the check after that.
+	 */
+	if (slab_is_available() && pa <= virt_to_phys(high_memory - 1) &&
+	    page_is_ram(__phys_to_pfn(pa))) {
+		pr_err("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__,
+		       (unsigned long long)pa, __builtin_return_address(0));
+		return NULL;
+	}
+#endif
+#endif /* CONFIG_PPC64 */
+
+	if (size == 0 || pa == 0)
+		return NULL;
+
+	/*
+	 * Is it already mapped?  Perhaps overlapped by a previous
+	 * mapping.
+	 */
+	va = p_block_mapped(pa);
+	if (va)
+		return (void __iomem *)va + (addr & ~PAGE_MASK);
+
+	/*
+	 * Choose an address to map it to.
+	 * Once the vmalloc system is running, we use it.
+	 * Before that, we map using addresses going
+	 * down from ioremap_bot.  vmalloc will use
+	 * the addresses from IOREMAP_BASE through
+	 * ioremap_bot
+	 *
+	 */
+	if (slab_is_available()) {
+		struct vm_struct *area;
+
+		area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_BASE,
+					    ioremap_bot, caller);
+		if (area == NULL)
+			return NULL;
+
+		area->phys_addr = pa;
+		va = (unsigned long)area->addr;
+	} else {
+		ioremap_bot -= size;
+		va = ioremap_bot;
+	}
+	ret = ioremap_range(va, pa, size, prot, NUMA_NO_NODE);
+	if (!ret)
+		return (void __iomem *)va + (addr & ~PAGE_MASK);
+
+	if (!slab_is_available())
+		ioremap_bot += size;
+
+	return NULL;
+}
+
 /*
  * Unmap an IO region and remove it from vmalloc'd list.
  * Access to IO memory should be serialized by driver.
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 4597f45e4dc6..bacf3b85191c 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -35,81 +35,6 @@
 
 extern char etext[], _stext[], _sinittext[], _einittext[];
 
-void __iomem *
-__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
-{
-	unsigned long v, i;
-	phys_addr_t p;
-	int err;
-
-	/*
-	 * Choose an address to map it to.
-	 * Once the vmalloc system is running, we use it.
-	 * Before then, we use space going down from IOREMAP_TOP
-	 * (ioremap_bot records where we're up to).
-	 */
-	p = addr & PAGE_MASK;
-	size = PAGE_ALIGN(addr + size) - p;
-
-	/*
-	 * If the address lies within the first 16 MB, assume it's in ISA
-	 * memory space
-	 */
-	if (p < 16*1024*1024)
-		p += _ISA_MEM_BASE;
-
-#ifndef CONFIG_CRASH_DUMP
-	/*
-	 * Don't allow anybody to remap normal RAM that we're using.
-	 * mem_init() sets high_memory so only do the check after that.
-	 */
-	if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
-	    page_is_ram(__phys_to_pfn(p))) {
-		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
-		       (unsigned long long)p, __builtin_return_address(0));
-		return NULL;
-	}
-#endif
-
-	if (size == 0)
-		return NULL;
-
-	/*
-	 * Is it already mapped?  Perhaps overlapped by a previous
-	 * mapping.
-	 */
-	v = p_block_mapped(p);
-	if (v)
-		goto out;
-
-	if (slab_is_available()) {
-		struct vm_struct *area;
-		area = get_vm_area_caller(size, VM_IOREMAP, caller);
-		if (area == 0)
-			return NULL;
-		area->phys_addr = p;
-		v = (unsigned long) area->addr;
-	} else {
-		v = (ioremap_bot -= size);
-	}
-
-	/*
-	 * Should check if it is a candidate for a BAT mapping
-	 */
-
-	err = 0;
-	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
-		err = map_kernel_page(v + i, p + i, prot);
-	if (err) {
-		if (slab_is_available())
-			vunmap((void *)v);
-		return NULL;
-	}
-
-out:
-	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
-}
-
 static void __init *early_alloc_pgtable(unsigned long size)
 {
 	void *ptr = memblock_alloc(size, size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 32220f7381d7..781263df9f5e 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -101,67 +101,6 @@ unsigned long __pte_frag_size_shift;
 EXPORT_SYMBOL(__pte_frag_size_shift);
 #endif
 
-int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
-{
-	unsigned long i;
-
-	for (i = 0; i < size; i += PAGE_SIZE) {
-		int err = map_kernel_page(ea + i, pa + i, prot);
-		if (err) {
-			if (slab_is_available())
-				unmap_kernel_range(ea, size);
-			else
-				WARN_ON_ONCE(1); /* Should clean up */
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
-				pgprot_t prot, void *caller)
-{
-	phys_addr_t paligned;
-	void __iomem *ret;
-
-	/*
-	 * Choose an address to map it to.
-	 * Once the vmalloc system is running, we use it.
-	 * Before that, we map using addresses going
-	 * down from ioremap_bot.  vmalloc will use
-	 * the addresses from IOREMAP_BASE through
-	 * ioremap_bot
-	 * 
-	 */
-	paligned = addr & PAGE_MASK;
-	size = PAGE_ALIGN(addr + size) - paligned;
-
-	if ((size == 0) || (paligned == 0))
-		return NULL;
-
-	if (slab_is_available()) {
-		struct vm_struct *area;
-
-		area = __get_vm_area_caller(size, VM_IOREMAP,
-					    IOREMAP_BASE, ioremap_bot,
-					    caller);
-		if (area == NULL)
-			return NULL;
-
-		area->phys_addr = paligned;
-		ret = __ioremap_at(paligned, area->addr, size, prot);
-	} else {
-		ret = __ioremap_at(paligned, (void *)ioremap_bot - size, size, prot);
-		if (ret)
-			ioremap_bot -= size;
-	}
-
-	if (ret)
-		ret += addr & ~PAGE_MASK;
-	return ret;
-}
-
 #ifndef __PAGETABLE_PUD_FOLDED
 /* 4 level page table */
 struct page *pgd_page(pgd_t pgd)
-- 
2.13.3


  parent reply	other threads:[~2019-08-13 20:38 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-13 20:11 [PATCH v1 01/10] powerpc/mm: drop ppc_md.iounmap() Christophe Leroy
2019-08-13 20:11 ` [PATCH v1 02/10] powerpc/mm: rework io-workaround invocation Christophe Leroy
2019-08-14  5:38   ` Christoph Hellwig
2019-08-13 20:11 ` [PATCH v1 03/10] powerpc/mm: move common 32/64 bits ioremap functions into ioremap.c Christophe Leroy
2019-08-13 20:11 ` [PATCH v1 04/10] powerpc/mm: move ioremap_prot() " Christophe Leroy
2019-08-13 20:11 ` [PATCH v1 05/10] powerpc/mm: Do early ioremaps from top to bottom on PPC64 too Christophe Leroy
2019-08-14  5:55   ` Christoph Hellwig
2019-08-14  6:10     ` Christophe Leroy
2019-08-14  6:14       ` Christoph Hellwig
2019-08-19 13:42   ` Nicholas Piggin
2019-08-20  0:20     ` Michael Ellerman
2019-08-20  5:10       ` Christophe Leroy
2019-08-13 20:11 ` [PATCH v1 06/10] powerpc/mm: make ioremap_bot common to all Christophe Leroy
2019-08-13 20:11 ` [PATCH v1 07/10] powerpc/mm: move iounmap() into ioremap.c and drop __iounmap() Christophe Leroy
2019-08-19 12:55   ` Michael Ellerman
2019-08-13 20:11 ` [PATCH v1 08/10] powerpc/mm: move __ioremap_at() and __iounmap_at() into ioremap.c Christophe Leroy
2019-08-14  5:23   ` Christoph Hellwig
2019-08-20  0:18   ` Michael Ellerman
2019-08-13 20:11 ` Christophe Leroy [this message]
2019-08-13 20:11 ` [PATCH v1 10/10] powerpc/mm: refactor ioremap_range() and use ioremap_page_range() Christophe Leroy
2019-08-14  5:49   ` Christoph Hellwig
2019-08-14  6:23     ` Christophe Leroy
2019-08-14  6:30       ` Christoph Hellwig
2019-08-14  5:19 ` [PATCH v1 01/10] powerpc/mm: drop ppc_md.iounmap() Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6aab930b3791d722784bada7e975a8d9504e7753.1565726867.git.christophe.leroy@c-s.fr \
    --to=christophe.leroy@c-s.fr \
    --cc=benh@kernel.crashing.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).