All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Jones <drjones@redhat.com>
To: kvm@vger.kernel.org
Cc: alexandru.elisei@arm.com, nikos.nikoleris@arm.com,
	andre.przywara@arm.com, eric.auger@redhat.com
Subject: [PATCH kvm-unit-tests v2 4/8] arm/arm64: mmu: Stop mapping an assumed IO region
Date: Tue, 20 Apr 2021 20:59:58 +0200	[thread overview]
Message-ID: <20210420190002.383444-5-drjones@redhat.com> (raw)
In-Reply-To: <20210420190002.383444-1-drjones@redhat.com>

By providing a proper ioremap function, we can just rely on devices
calling it for each region they need (as they already do) instead of
mapping a big assumed I/O range. We don't require the MMU to be
enabled at the time of the ioremap. In that case, we add the mapping
to the identity map anyway. This allows us to call setup_vm after
io_init. Why don't we just call setup_vm before io_init, I hear you
ask? Well, that's because tests like sieve want to start with the MMU
off, later call setup_vm, and all the while have working I/O. Some
unit tests are just really demanding...

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/arm/asm/io.h     |  6 ++++++
 lib/arm/asm/mmu.h    |  1 +
 lib/arm/asm/page.h   |  2 ++
 lib/arm/mmu.c        | 37 +++++++++++++++++++++++++++----------
 lib/arm64/asm/io.h   |  6 ++++++
 lib/arm64/asm/mmu.h  |  1 +
 lib/arm64/asm/page.h |  2 ++
 7 files changed, 45 insertions(+), 10 deletions(-)

diff --git a/lib/arm/asm/io.h b/lib/arm/asm/io.h
index ba3b0b2412ad..e4caa6ff5d1e 100644
--- a/lib/arm/asm/io.h
+++ b/lib/arm/asm/io.h
@@ -77,6 +77,12 @@ static inline void __raw_writel(u32 val, volatile void __iomem *addr)
 		     : "r" (val));
 }
 
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
+{
+	return __ioremap(phys_addr, size);
+}
+
 #define virt_to_phys virt_to_phys
 static inline phys_addr_t virt_to_phys(const volatile void *x)
 {
diff --git a/lib/arm/asm/mmu.h b/lib/arm/asm/mmu.h
index 122874b8aebe..d88a4f16df42 100644
--- a/lib/arm/asm/mmu.h
+++ b/lib/arm/asm/mmu.h
@@ -12,6 +12,7 @@
 #define PTE_SHARED		L_PTE_SHARED
 #define PTE_AF			PTE_EXT_AF
 #define PTE_WBWA		L_PTE_MT_WRITEALLOC
+#define PTE_UNCACHED		L_PTE_MT_UNCACHED
 
 /* See B3.18.7 TLB maintenance operations */
 
diff --git a/lib/arm/asm/page.h b/lib/arm/asm/page.h
index 1fb5cd26ac66..8eb4a883808e 100644
--- a/lib/arm/asm/page.h
+++ b/lib/arm/asm/page.h
@@ -47,5 +47,7 @@ typedef struct { pteval_t pgprot; } pgprot_t;
 extern phys_addr_t __virt_to_phys(unsigned long addr);
 extern unsigned long __phys_to_virt(phys_addr_t addr);
 
+extern void *__ioremap(phys_addr_t phys_addr, size_t size);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASMARM_PAGE_H_ */
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index 15eef007f256..ee0c79142ba1 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -11,6 +11,7 @@
 #include <asm/mmu.h>
 #include <asm/setup.h>
 #include <asm/page.h>
+#include <asm/io.h>
 
 #include "alloc_page.h"
 #include "vmalloc.h"
@@ -157,9 +158,8 @@ void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
 void *setup_mmu(phys_addr_t phys_end)
 {
 	uintptr_t code_end = (uintptr_t)&etext;
-	struct mem_region *r;
 
-	/* 0G-1G = I/O, 1G-3G = identity, 3G-4G = vmalloc */
+	/* 3G-4G region is reserved for vmalloc, cap phys_end at 3G */
 	if (phys_end > (3ul << 30))
 		phys_end = 3ul << 30;
 
@@ -170,14 +170,8 @@ void *setup_mmu(phys_addr_t phys_end)
 			"Unsupported translation granule %ld\n", PAGE_SIZE);
 #endif
 
-	mmu_idmap = alloc_page();
-
-	for (r = mem_regions; r->end; ++r) {
-		if (!(r->flags & MR_F_IO))
-			continue;
-		mmu_set_range_sect(mmu_idmap, r->start, r->start, r->end,
-				   __pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
-	}
+	if (!mmu_idmap)
+		mmu_idmap = alloc_page();
 
 	/* armv8 requires code shared between EL1 and EL0 to be read-only */
 	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
@@ -192,6 +186,29 @@ void *setup_mmu(phys_addr_t phys_end)
 	return mmu_idmap;
 }
 
+void __iomem *__ioremap(phys_addr_t phys_addr, size_t size)
+{
+	phys_addr_t paddr_aligned = phys_addr & PAGE_MASK;
+	phys_addr_t paddr_end = PAGE_ALIGN(phys_addr + size);
+	pgprot_t prot = __pgprot(PTE_UNCACHED | PTE_USER);
+	pgd_t *pgtable;
+
+	assert(sizeof(long) == 8 || !(phys_addr >> 32));
+
+	if (mmu_enabled()) {
+		pgtable = current_thread_info()->pgtable;
+	} else {
+		if (!mmu_idmap)
+			mmu_idmap = alloc_page();
+		pgtable = mmu_idmap;
+	}
+
+	mmu_set_range_ptes(pgtable, paddr_aligned, paddr_aligned,
+			   paddr_end, prot);
+
+	return (void __iomem *)(unsigned long)phys_addr;
+}
+
 phys_addr_t __virt_to_phys(unsigned long addr)
 {
 	if (mmu_enabled()) {
diff --git a/lib/arm64/asm/io.h b/lib/arm64/asm/io.h
index e0a03b250d5b..be19f471c0fa 100644
--- a/lib/arm64/asm/io.h
+++ b/lib/arm64/asm/io.h
@@ -71,6 +71,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 	return val;
 }
 
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
+{
+	return __ioremap(phys_addr, size);
+}
+
 #define virt_to_phys virt_to_phys
 static inline phys_addr_t virt_to_phys(const volatile void *x)
 {
diff --git a/lib/arm64/asm/mmu.h b/lib/arm64/asm/mmu.h
index 72d75eafc882..72371b2d9fe3 100644
--- a/lib/arm64/asm/mmu.h
+++ b/lib/arm64/asm/mmu.h
@@ -8,6 +8,7 @@
 #include <asm/barrier.h>
 
 #define PMD_SECT_UNCACHED	PMD_ATTRINDX(MT_DEVICE_nGnRE)
+#define PTE_UNCACHED		PTE_ATTRINDX(MT_DEVICE_nGnRE)
 #define PTE_WBWA		PTE_ATTRINDX(MT_NORMAL)
 
 static inline void flush_tlb_all(void)
diff --git a/lib/arm64/asm/page.h b/lib/arm64/asm/page.h
index ae4484b22114..d0fac6ea563d 100644
--- a/lib/arm64/asm/page.h
+++ b/lib/arm64/asm/page.h
@@ -72,5 +72,7 @@ typedef struct { pteval_t pgprot; } pgprot_t;
 extern phys_addr_t __virt_to_phys(unsigned long addr);
 extern unsigned long __phys_to_virt(phys_addr_t addr);
 
+extern void *__ioremap(phys_addr_t phys_addr, size_t size);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASMARM64_PAGE_H_ */
-- 
2.30.2


  parent reply	other threads:[~2021-04-20 19:00 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-20 18:59 [PATCH kvm-unit-tests v2 0/8] arm/arm64: Prepare for target-efi Andrew Jones
2021-04-20 18:59 ` [PATCH kvm-unit-tests v2 1/8] arm/arm64: Reorganize cstart assembler Andrew Jones
2021-04-22 15:37   ` Alexandru Elisei
2021-04-20 18:59 ` [PATCH kvm-unit-tests v2 2/8] arm/arm64: Move setup_vm into setup Andrew Jones
2021-04-20 18:59 ` [PATCH kvm-unit-tests v2 3/8] pci-testdev: ioremap regions Andrew Jones
2021-04-26 15:03   ` Andre Przywara
2021-04-26 16:25     ` Andrew Jones
2021-04-20 18:59 ` Andrew Jones [this message]
2021-04-23 16:10   ` [PATCH kvm-unit-tests v2 4/8] arm/arm64: mmu: Stop mapping an assumed IO region Alexandru Elisei
2021-04-26  9:13     ` Andrew Jones
2021-04-20 18:59 ` [PATCH kvm-unit-tests v2 5/8] arm/arm64: mmu: Remove memory layout assumptions Andrew Jones
2021-04-23 16:31   ` Alexandru Elisei
2021-04-20 19:00 ` [PATCH kvm-unit-tests v2 6/8] arm/arm64: setup: Consolidate " Andrew Jones
2021-04-21  6:40   ` Andrew Jones
2021-04-22 16:12     ` Andrew Jones
2021-04-25 10:35       ` Alexandru Elisei
2021-04-25 10:35   ` Alexandru Elisei
2021-04-26  9:18     ` Andrew Jones
2021-04-20 19:00 ` [PATCH kvm-unit-tests v2 7/8] chr-testdev: Silently fail init Andrew Jones
2021-04-20 19:00 ` [PATCH kvm-unit-tests v2 8/8] arm/arm64: psci: don't assume method is hvc Andrew Jones
2021-04-21  7:02   ` Andrew Jones
2021-04-22 16:17     ` Andrew Jones
2021-04-26 14:57       ` Alexandru Elisei
2021-04-26 16:35         ` Andrew Jones
2021-04-27 15:47           ` Alexandru Elisei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210420190002.383444-5-drjones@redhat.com \
    --to=drjones@redhat.com \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=eric.auger@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=nikos.nikoleris@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.