kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: kernel-team@android.com, Marc Zyngier <maz@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 04/20] KVM: arm64: Add support for creating kernel-agnostic stage-2 page tables
Date: Thu, 30 Jul 2020 16:33:50 +0100	[thread overview]
Message-ID: <20200730153406.25136-5-will@kernel.org> (raw)
In-Reply-To: <20200730153406.25136-1-will@kernel.org>

Introduce alloc() and free() functions to the generic page-table code
for guest stage-2 page-tables and plumb these into the existing KVM
page-table allocator. Subsequent patches will convert other operations
within the KVM allocator over to the generic code.

Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h    |  1 +
 arch/arm64/include/asm/kvm_pgtable.h | 17 ++++++++
 arch/arm64/kvm/mmu.c                 | 42 ++++++++------------
 arch/arm64/kvm/pgtable.c             | 58 ++++++++++++++++++++++++++++
 4 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e1a32c0707bb..072ab1fac98a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -81,6 +81,7 @@ struct kvm_s2_mmu {
 	 */
 	pgd_t		*pgd;
 	phys_addr_t	pgd_phys;
+	void		*pgt_cookie;
 
 	/* The last vcpu id that ran on each physical CPU */
 	int __percpu *last_vcpu_ran;
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index c607025e1a05..7b4df5d0d2b8 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -99,6 +99,23 @@ void kvm_pgtable_hyp_free_cookie(void *cookie);
 int kvm_pgtable_hyp_map(void *cookie, u64 addr, u64 size, u64 phys,
 			enum kvm_pgtable_prot prot);
 
+/**
+ * kvm_pgtable_stage2_alloc_cookie() - Allocate a guest stage-2 page-table.
+ * @kvm:	KVM structure representing the guest virtual machine.
+ *
+ * Return: An opaque cookie which can be used to manipulate the page-table.
+ */
+void *kvm_pgtable_stage2_alloc_cookie(struct kvm *kvm);
+
+/**
+ * kvm_pgtable_stage2_free_cookie() - Free an unused guest stage-2 page-table.
+ * @cookie:	Opaque cookie allocated by kvm_pgtable_stage2_alloc_cookie().
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior
+ * to freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_stage2_free_cookie(void *cookie);
+
 /**
  * kvm_pgtable_get_pgd_phys() - Get physical pgd pointer for a page-table.
  * @cookie:	Opaque cookie allocated by kvm_pgtable_*_alloc_cookie().
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d5cb88a3c1e5..ecca611f208b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -696,35 +696,27 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
  * @kvm:	The pointer to the KVM structure
  * @mmu:	The pointer to the s2 MMU structure
  *
- * Allocates only the stage-2 HW PGD level table(s) of size defined by
- * stage2_pgd_size(mmu->kvm).
- *
+ * Allocates only the stage-2 HW PGD level table(s).
  * Note we don't need locking here as this is only called when the VM is
  * created, which can only be done once.
  */
 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 {
-	phys_addr_t pgd_phys;
-	pgd_t *pgd;
 	int cpu;
+	void *cookie;
 
-	if (mmu->pgd != NULL) {
+	if (mmu->pgt_cookie != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
 
-	/* Allocate the HW PGD, making sure that each page gets its own refcount */
-	pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
-	if (!pgd)
+	cookie = kvm_pgtable_stage2_alloc_cookie(kvm);
+	if (!cookie)
 		return -ENOMEM;
 
-	pgd_phys = virt_to_phys(pgd);
-	if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
-		return -EINVAL;
-
 	mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
 	if (!mmu->last_vcpu_ran) {
-		free_pages_exact(pgd, stage2_pgd_size(kvm));
+		kvm_pgtable_stage2_free_cookie(cookie);
 		return -ENOMEM;
 	}
 
@@ -732,8 +724,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 		*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
 
 	mmu->kvm = kvm;
-	mmu->pgd = pgd;
-	mmu->pgd_phys = pgd_phys;
+	mmu->pgt_cookie = cookie;
+	mmu->pgd_phys = kvm_pgtable_get_pgd_phys(cookie);
+	mmu->pgd = __va(mmu->pgd_phys);
 	mmu->vmid.vmid_gen = 0;
 
 	return 0;
@@ -809,21 +802,20 @@ void stage2_unmap_vm(struct kvm *kvm)
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
 {
 	struct kvm *kvm = mmu->kvm;
-	void *pgd = NULL;
+	void *cookie = NULL;
 
 	spin_lock(&kvm->mmu_lock);
-	if (mmu->pgd) {
-		unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
-		pgd = READ_ONCE(mmu->pgd);
+	cookie = mmu->pgt_cookie;
+	if (cookie) {
 		mmu->pgd = NULL;
+		mmu->pgd_phys = 0;
+		mmu->pgt_cookie = NULL;
+		free_percpu(mmu->last_vcpu_ran);
 	}
 	spin_unlock(&kvm->mmu_lock);
 
-	/* Free the HW pgd, one page at a time */
-	if (pgd) {
-		free_pages_exact(pgd, stage2_pgd_size(kvm));
-		free_percpu(mmu->last_vcpu_ran);
-	}
+	if (cookie)
+		kvm_pgtable_stage2_free_cookie(cookie);
 }
 
 static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache,
diff --git a/arch/arm64/kvm/pgtable.c b/arch/arm64/kvm/pgtable.c
index b148c76f8b79..f05a45c02361 100644
--- a/arch/arm64/kvm/pgtable.c
+++ b/arch/arm64/kvm/pgtable.c
@@ -443,3 +443,61 @@ u64 kvm_pgtable_get_pgd_phys(void *cookie)
 	struct kvm_pgtable *pgt = cookie;
 	return __pa(pgt->pgd);
 }
+
+void *kvm_pgtable_stage2_alloc_cookie(struct kvm *kvm)
+{
+	size_t pgd_sz;
+	u64 vtcr = kvm->arch.vtcr;
+	struct kvm_pgtable *pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
+
+	if (!pgt)
+		return NULL;
+
+	pgt->ia_bits = VTCR_EL2_IPA(vtcr);
+	pgt->start_level = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
+	pgt->mmu = &kvm->arch.mmu;
+
+	pgd_sz = kvm_pgd_pages(pgt) * PAGE_SIZE;
+	pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL | __GFP_ZERO);
+	if (!pgt->pgd) {
+		kfree(pgt);
+		pgt = NULL;
+	}
+
+	return pgt;
+}
+
+static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+			      enum kvm_pgtable_walk_flags flag,
+			      void * const arg)
+{
+	kvm_pte_t pte = *ptep;
+
+	if (!kvm_pte_valid(pte))
+		return 0;
+
+	put_page(virt_to_page(ptep));
+
+	if (kvm_pte_table(pte, level))
+		free_page((unsigned long)kvm_pte_follow(pte));
+
+	return 0;
+}
+
+void kvm_pgtable_stage2_free_cookie(void *cookie)
+{
+	size_t pgd_sz;
+	struct kvm_pgtable *pgt = cookie;
+	struct kvm_pgtable_walker walker = {
+		.cb	= stage2_free_walker,
+		.flags	= KVM_PGTABLE_WALK_LEAF |
+			  KVM_PGTABLE_WALK_TABLE_POST,
+	};
+
+	if (kvm_pgtable_walk(cookie, 0, BIT(pgt->ia_bits), &walker))
+		kvm_err("Failed to free page-table pages\n");
+
+	pgd_sz = kvm_pgd_pages(pgt) * PAGE_SIZE;
+	free_pages_exact(pgt->pgd, pgd_sz);
+	kfree(pgt);
+}
-- 
2.28.0.rc0.142.g3c755180ce-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2020-07-30 15:34 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-30 15:33 [PATCH 00/20] KVM: arm64: Rewrite page-table code and fault handling Will Deacon
2020-07-30 15:33 ` [PATCH 01/20] KVM: arm64: Add stand-alone page-table walker infrastructure Will Deacon
2020-07-30 15:33 ` [PATCH 02/20] KVM: arm64: Add support for creating kernel-agnostic stage-1 page tables Will Deacon
2020-07-31  8:14   ` Quentin Perret
2020-07-31  8:19     ` Quentin Perret
2020-07-31  8:22       ` Will Deacon
2020-07-31  8:36         ` Quentin Perret
2020-07-31  9:06           ` Quentin Perret
2020-07-30 15:33 ` [PATCH 03/20] KVM: arm64: Use generic allocator for hyp stage-1 page-tables Will Deacon
2020-07-30 15:33 ` Will Deacon [this message]
2020-07-30 15:33 ` [PATCH 05/20] KVM: arm64: Add support for stage-2 map()/unmap() in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 06/20] KVM: arm64: Convert kvm_phys_addr_ioremap() to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 07/20] KVM: arm64: Convert kvm_set_spte_hva() " Will Deacon
2020-07-30 15:33 ` [PATCH 08/20] KVM: arm64: Convert unmap_stage2_range() " Will Deacon
2020-07-30 15:33 ` [PATCH 09/20] KVM: arm64: Add support for stage-2 page-aging in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 10/20] KVM: arm64: Convert page-aging and access faults to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 11/20] KVM: arm64: Add support for stage-2 write-protect in generic page-table Will Deacon
2020-07-30 15:33 ` [PATCH 12/20] KVM: arm64: Convert write-protect operation to generic page-table API Will Deacon
2020-07-30 15:33 ` [PATCH 13/20] KVM: arm64: Add support for stage-2 cache flushing in generic page-table Will Deacon
2020-07-30 15:34 ` [PATCH 14/20] KVM: arm64: Convert memslot cache-flushing code to generic page-table API Will Deacon
2020-07-30 15:34 ` [PATCH 15/20] KVM: arm64: Add support for relaxing stage-2 perms in generic page-table code Will Deacon
2020-07-30 15:34 ` [PATCH 16/20] KVM: arm64: Convert user_mem_abort() to generic page-table API Will Deacon
2020-07-30 15:34 ` [PATCH 17/20] KVM: arm64: Check the cookie instead of the pgd when modifying page-table Will Deacon
2020-07-30 15:34 ` [PATCH 18/20] KVM: arm64: Remove unused page-table code Will Deacon
2020-07-30 15:34 ` [PATCH 19/20] KVM: arm64: Remove unused 'pgd' field from 'struct kvm_s2_mmu' Will Deacon
2020-07-30 15:34 ` [PATCH 20/20] KVM: arm64: Don't constrain maximum IPA size based on host configuration Will Deacon
2020-08-03  8:17 ` [PATCH 00/20] KVM: arm64: Rewrite page-table code and fault handling Andrew Jones
2020-08-03 17:12   ` Quentin Perret

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200730153406.25136-5-will@kernel.org \
    --to=will@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).