kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: kernel-team@android.com, Marc Zyngier <maz@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 02/20] KVM: arm64: Add support for creating kernel-agnostic stage-1 page tables
Date: Thu, 30 Jul 2020 16:33:48 +0100	[thread overview]
Message-ID: <20200730153406.25136-3-will@kernel.org> (raw)
In-Reply-To: <20200730153406.25136-1-will@kernel.org>

The generic page-table walker is pretty useless as it stands, because it
doesn't understand enough to allocate anything. Teach it about stage-1
page-tables, and hook up an API for allocating these for the hypervisor
at EL2.

Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/kvm_pgtable.h |  33 ++++++
 arch/arm64/kvm/pgtable.c             | 145 +++++++++++++++++++++++++++
 2 files changed, 178 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index c1679c1a9a49..5be09ac3efa3 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -61,6 +61,39 @@ struct kvm_pgtable_walker {
 	const enum kvm_pgtable_walk_flags	flags;
 };
 
+/**
+ * kvm_pgtable_hyp_alloc_cookie() - Allocate a hypervisor stage-1 page-table.
+ * @va_bits:	Maximum virtual address bits.
+ *
+ * Return: An opaque cookie which can be used to manipulate the page-table.
+ */
+void *kvm_pgtable_hyp_alloc_cookie(u32 va_bits);
+
+/**
+ * kvm_pgtable_hyp_free_cookie() - Free an unused hypervisor stage-1 page-table.
+ * @cookie:	Opaque cookie allocated by kvm_pgtable_hyp_alloc_cookie().
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior
+ * to freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_hyp_free_cookie(void *cookie);
+
+/**
+ * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
+ * @cookie:	Opaque cookie allocated by kvm_pgtable_hyp_alloc_cookie().
+ * @addr:	Virtual address at which to place the mapping.
+ * @size:	Size of the mapping.
+ * @phys:	Physical address of the memory to map.
+ * @prot:	Permissions and attributes for the mapping.
+ *
+ * If device attributes are not explicitly requested in @prot, then the
+ * mapping will be normal, cacheable.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kvm_pgtable_hyp_map(void *cookie, u64 addr, u64 size, u64 phys,
+			enum kvm_pgtable_prot prot);
+
 /**
  * kvm_pgtable_get_pgd_phys() - Get physical pgd pointer for a page-table.
  * @cookie:	Opaque cookie allocated by kvm_pgtable_*_alloc_cookie().
diff --git a/arch/arm64/kvm/pgtable.c b/arch/arm64/kvm/pgtable.c
index 298620cbd67a..b148c76f8b79 100644
--- a/arch/arm64/kvm/pgtable.c
+++ b/arch/arm64/kvm/pgtable.c
@@ -24,8 +24,18 @@
 
 #define KVM_PTE_LEAF_ATTR_LO		GENMASK(11, 2)
 
+#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX	GENMASK(4, 2)
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP	GENMASK(7, 6)
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO	3
+#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW	1
+#define KVM_PTE_LEAF_ATTR_LO_S1_SH	GENMASK(9, 8)
+#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS	3
+#define KVM_PTE_LEAF_ATTR_LO_S1_AF	BIT(10)
+
 #define KVM_PTE_LEAF_ATTR_HI		GENMASK(63, 51)
 
+#define KVM_PTE_LEAF_ATTR_HI_S1_XN	BIT(54)
+
 struct kvm_pgtable {
 	struct kvm_s2_mmu		*mmu;
 
@@ -293,6 +303,141 @@ int kvm_pgtable_walk(void *cookie, u64 addr, u64 size,
 	return _kvm_pgtable_walk(&walk_data);
 }
 
+struct hyp_map_data {
+	u64		phys;
+	kvm_pte_t	attr;
+};
+
+static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
+				 struct hyp_map_data *data)
+{
+	bool device = prot & KVM_PGTABLE_PROT_DEVICE;
+	u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
+	kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
+	u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
+	u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
+					       KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
+
+	if (!(prot & KVM_PGTABLE_PROT_R))
+		return -EINVAL;
+
+	if (prot & KVM_PGTABLE_PROT_X) {
+		if (prot & KVM_PGTABLE_PROT_W)
+			return -EINVAL;
+
+		if (device)
+			return -EINVAL;
+	} else {
+		attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
+	}
+
+	attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
+	attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
+	attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
+	data->attr = attr;
+	return 0;
+}
+
+static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
+				    kvm_pte_t *ptep, struct hyp_map_data *data)
+{
+	u64 granule = kvm_granule_size(level), phys = data->phys;
+
+	if (!kvm_block_mapping_supported(addr, end, phys, level))
+		return false;
+
+	WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level));
+	data->phys += granule;
+	return true;
+}
+
+static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+			  enum kvm_pgtable_walk_flags flag, void * const arg)
+{
+	kvm_pte_t *childp;
+
+	if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
+		return 0;
+
+	if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
+		return -EINVAL;
+
+	childp = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL);
+	if (!childp) {
+		kvm_err("Failed to allocate page-table page\n");
+		return -ENOMEM;
+	}
+
+	kvm_set_table_pte(ptep, childp);
+	return 0;
+}
+
+int kvm_pgtable_hyp_map(void *cookie, u64 addr, u64 size, u64 phys,
+			enum kvm_pgtable_prot prot)
+{
+	int ret;
+	struct hyp_map_data map_data = {
+		.phys	= ALIGN_DOWN(phys, PAGE_SIZE),
+	};
+	struct kvm_pgtable_walker walker = {
+		.cb	= hyp_map_walker,
+		.flags	= KVM_PGTABLE_WALK_LEAF,
+		.arg	= &map_data,
+	};
+
+	ret = hyp_map_set_prot_attr(prot, &map_data);
+	if (ret)
+		return ret;
+
+	ret = kvm_pgtable_walk(cookie, addr, size, &walker);
+	dsb(ishst);
+	isb();
+	return ret;
+}
+
+void *kvm_pgtable_hyp_alloc_cookie(u32 va_bits)
+{
+	struct kvm_pgtable *pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
+
+	if (!pgt)
+		return NULL;
+
+	pgt->ia_bits = va_bits;
+	pgt->start_level = kvm_start_level(va_bits);
+
+	pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL);
+	if (!pgt->pgd) {
+		kfree(pgt);
+		pgt = NULL;
+	}
+
+	return pgt;
+}
+
+static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+			   enum kvm_pgtable_walk_flags flag, void * const arg)
+{
+	free_page((unsigned long)kvm_pte_follow(*ptep));
+	return 0;
+}
+
+void kvm_pgtable_hyp_free_cookie(void *cookie)
+{
+	size_t pgd_sz;
+	struct kvm_pgtable *pgt = cookie;
+	struct kvm_pgtable_walker walker = {
+		.cb	= hyp_free_walker,
+		.flags	= KVM_PGTABLE_WALK_TABLE_POST,
+	};
+
+	if (kvm_pgtable_walk(cookie, 0, BIT(pgt->ia_bits), &walker))
+		kvm_err("Failed to free page-table pages\n");
+
+	pgd_sz = kvm_pgd_pages(pgt) * PAGE_SIZE;
+	free_pages_exact(pgt->pgd, pgd_sz);
+	kfree(pgt);
+}
+
 u64 kvm_pgtable_get_pgd_phys(void *cookie)
 {
 	struct kvm_pgtable *pgt = cookie;
-- 
2.28.0.rc0.142.g3c755180ce-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2020-07-30 15:34 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-30 15:33 [PATCH 00/20] KVM: arm64: Rewrite page-table code and fault handling Will Deacon
2020-07-30 15:33 ` [PATCH 01/20] KVM: arm64: Add stand-alone page-table walker infrastructure Will Deacon
2020-07-30 15:33 ` Will Deacon [this message]
2020-07-31  8:14   ` [PATCH 02/20] KVM: arm64: Add support for creating kernel-agnostic stage-1 page tables Quentin Perret
2020-07-31  8:19     ` Quentin Perret
2020-07-31  8:22       ` Will Deacon
2020-07-31  8:36         ` Quentin Perret
2020-07-31  9:06           ` Quentin Perret
2020-07-30 15:33 ` [PATCH 03/20] KVM: arm64: Use generic allocator for hyp stage-1 page-tables Will Deacon
2020-07-30 15:33 ` [PATCH 04/20] KVM: arm64: Add support for creating kernel-agnostic stage-2 page tables Will Deacon
2020-07-30 15:33 ` [PATCH 05/20] KVM: arm64: Add support for stage-2 map()/unmap() in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 06/20] KVM: arm64: Convert kvm_phys_addr_ioremap() to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 07/20] KVM: arm64: Convert kvm_set_spte_hva() " Will Deacon
2020-07-30 15:33 ` [PATCH 08/20] KVM: arm64: Convert unmap_stage2_range() " Will Deacon
2020-07-30 15:33 ` [PATCH 09/20] KVM: arm64: Add support for stage-2 page-aging in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 10/20] KVM: arm64: Convert page-aging and access faults to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 11/20] KVM: arm64: Add support for stage-2 write-protect in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 12/20] KVM: arm64: Convert write-protect operation to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 13/20] KVM: arm64: Add support for stage-2 cache flushing in generic page-table Will Deacon
2020-07-30 15:34 ` [PATCH 14/20] KVM: arm64: Convert memslot cache-flushing code to generic page-table API Will Deacon
2020-07-30 15:34 ` [PATCH 15/20] KVM: arm64: Add support for relaxing stage-2 perms in generic page-table code Will Deacon
2020-07-30 15:34 ` [PATCH 16/20] KVM: arm64: Convert user_mem_abort() to generic page-table API Will Deacon
2020-07-30 15:34 ` [PATCH 17/20] KVM: arm64: Check the cookie instead of the pgd when modifying page-table Will Deacon
2020-07-30 15:34 ` [PATCH 18/20] KVM: arm64: Remove unused page-table code Will Deacon
2020-07-30 15:34 ` [PATCH 19/20] KVM: arm64: Remove unused 'pgd' field from 'struct kvm_s2_mmu' Will Deacon
2020-07-30 15:34 ` [PATCH 20/20] KVM: arm64: Don't constrain maximum IPA size based on host configuration Will Deacon
2020-08-03  8:17 ` [PATCH 00/20] KVM: arm64: Rewrite page-table code and fault handling Andrew Jones
2020-08-03 17:12   ` Quentin Perret

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200730153406.25136-3-will@kernel.org \
    --to=will@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).