kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Cannon Matthews <cannonmatthews@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Peter Shier <pshier@google.com>,
	Peter Feiner <pfeiner@google.com>,
	Junaid Shahid <junaids@google.com>,
	Jim Mattson <jmattson@google.com>,
	Yulei Zhang <yulei.kernel@gmail.com>,
	Wanpeng Li <kernellwp@gmail.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Xiao Guangrong <xiaoguangrong.eric@gmail.com>,
	Ben Gardon <bgardon@google.com>
Subject: [PATCH 04/22] kvm: mmu: Allocate and free TDP MMU roots
Date: Fri, 25 Sep 2020 14:22:44 -0700	[thread overview]
Message-ID: <20200925212302.3979661-5-bgardon@google.com> (raw)
In-Reply-To: <20200925212302.3979661-1-bgardon@google.com>

The TDP MMU must be able to allocate paging structure root pages and track
the usage of those pages. Implement a similar, but separate system for root
page allocation to that of the x86 shadow paging implementation. When
future patches add synchronization model changes to allow for parallel
page faults, these pages will need to be handled differently from the
x86 shadow paging based MMU's root pages.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/include/asm/kvm_host.h |   1 +
 arch/x86/kvm/mmu/mmu.c          |  27 +++---
 arch/x86/kvm/mmu/mmu_internal.h |   9 ++
 arch/x86/kvm/mmu/tdp_mmu.c      | 157 ++++++++++++++++++++++++++++++++
 arch/x86/kvm/mmu/tdp_mmu.h      |   5 +
 5 files changed, 188 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 35107819f48ae..9ce6b35ecb33a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -972,6 +972,7 @@ struct kvm_arch {
 	 * operations.
 	 */
 	bool tdp_mmu_enabled;
+	struct list_head tdp_mmu_roots;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0cb0c26939dfc..0f871e36394da 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -170,11 +170,6 @@ module_param(dbg, bool, 0644);
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
 			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
 
-#define ACC_EXEC_MASK    1
-#define ACC_WRITE_MASK   PT_WRITABLE_MASK
-#define ACC_USER_MASK    PT_USER_MASK
-#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
-
 /* The mask for the R/X bits in EPT PTEs */
 #define PT64_EPT_READABLE_MASK			0x1ull
 #define PT64_EPT_EXECUTABLE_MASK		0x4ull
@@ -232,7 +227,7 @@ struct kvm_shadow_walk_iterator {
 	     __shadow_walk_next(&(_walker), spte))
 
 static struct kmem_cache *pte_list_desc_cache;
-static struct kmem_cache *mmu_page_header_cache;
+struct kmem_cache *mmu_page_header_cache;
 static struct percpu_counter kvm_total_used_mmu_pages;
 
 static u64 __read_mostly shadow_nx_mask;
@@ -3597,10 +3592,14 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
 	if (!VALID_PAGE(*root_hpa))
 		return;
 
-	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
-	--sp->root_count;
-	if (!sp->root_count && sp->role.invalid)
-		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+	if (is_tdp_mmu_root(kvm, *root_hpa)) {
+		kvm_tdp_mmu_put_root_hpa(kvm, *root_hpa);
+	} else {
+		sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
+		--sp->root_count;
+		if (!sp->root_count && sp->role.invalid)
+			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+	}
 
 	*root_hpa = INVALID_PAGE;
 }
@@ -3691,7 +3690,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 	unsigned i;
 
 	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
-		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
+		if (vcpu->kvm->arch.tdp_mmu_enabled) {
+			root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
+		} else {
+			root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
+					      true);
+		}
+
 		if (!VALID_PAGE(root))
 			return -ENOSPC;
 		vcpu->arch.mmu->root_hpa = root;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 65bb110847858..530b7d893c7b3 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -41,8 +41,12 @@ struct kvm_mmu_page {
 
 	/* Number of writes since the last time traversal visited this page.  */
 	atomic_t write_flooding_count;
+
+	bool tdp_mmu_page;
 };
 
+extern struct kmem_cache *mmu_page_header_cache;
+
 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
 {
 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -69,6 +73,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 
+#define ACC_EXEC_MASK    1
+#define ACC_WRITE_MASK   PT_WRITABLE_MASK
+#define ACC_USER_MASK    PT_USER_MASK
+#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+
 /* Functions for interpreting SPTEs */
 kvm_pfn_t spte_to_pfn(u64 pte);
 bool is_mmio_spte(u64 spte);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 8241e18c111e6..cdca829e42040 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1,5 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 
+#include "mmu.h"
+#include "mmu_internal.h"
 #include "tdp_mmu.h"
 
 static bool __read_mostly tdp_mmu_enabled = true;
@@ -25,10 +27,165 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
 
 	/* This should not be changed for the lifetime of the VM. */
 	kvm->arch.tdp_mmu_enabled = true;
+
+	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
 }
 
 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
 {
 	if (!kvm->arch.tdp_mmu_enabled)
 		return;
+
+	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
+}
+
+#define for_each_tdp_mmu_root(_kvm, _root)			    \
+	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
+
+bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+{
+	struct kvm_mmu_page *root;
+
+	if (!kvm->arch.tdp_mmu_enabled)
+		return false;
+
+	root = to_shadow_page(hpa);
+
+	if (WARN_ON(!root))
+		return false;
+
+	return root->tdp_mmu_page;
+}
+
+static void free_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+	lockdep_assert_held(&kvm->mmu_lock);
+
+	WARN_ON(root->root_count);
+	WARN_ON(!root->tdp_mmu_page);
+
+	list_del(&root->link);
+
+	free_page((unsigned long)root->spt);
+	kmem_cache_free(mmu_page_header_cache, root);
+}
+
+static void put_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+	lockdep_assert_held(&kvm->mmu_lock);
+
+	root->root_count--;
+	if (!root->root_count)
+		free_tdp_mmu_root(kvm, root);
+}
+
+static void get_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+	lockdep_assert_held(&kvm->mmu_lock);
+	WARN_ON(!root->root_count);
+
+	root->root_count++;
+}
+
+void kvm_tdp_mmu_put_root_hpa(struct kvm *kvm, hpa_t root_hpa)
+{
+	struct kvm_mmu_page *root;
+
+	root = to_shadow_page(root_hpa);
+
+	if (WARN_ON(!root))
+		return;
+
+	put_tdp_mmu_root(kvm, root);
+}
+
+static struct kvm_mmu_page *find_tdp_mmu_root_with_role(
+		struct kvm *kvm, union kvm_mmu_page_role role)
+{
+	struct kvm_mmu_page *root;
+
+	lockdep_assert_held(&kvm->mmu_lock);
+	for_each_tdp_mmu_root(kvm, root) {
+		WARN_ON(!root->root_count);
+
+		if (root->role.word == role.word)
+			return root;
+	}
+
+	return NULL;
+}
+
+static struct kvm_mmu_page *alloc_tdp_mmu_root(struct kvm_vcpu *vcpu,
+					       union kvm_mmu_page_role role)
+{
+	struct kvm_mmu_page *new_root;
+	struct kvm_mmu_page *root;
+
+	new_root = kvm_mmu_memory_cache_alloc(
+			&vcpu->arch.mmu_page_header_cache);
+	new_root->spt = kvm_mmu_memory_cache_alloc(
+			&vcpu->arch.mmu_shadow_page_cache);
+	set_page_private(virt_to_page(new_root->spt), (unsigned long)new_root);
+
+	new_root->role.word = role.word;
+	new_root->root_count = 1;
+	new_root->gfn = 0;
+	new_root->tdp_mmu_page = true;
+
+	spin_lock(&vcpu->kvm->mmu_lock);
+
+	/* Check that no matching root exists before adding this one. */
+	root = find_tdp_mmu_root_with_role(vcpu->kvm, role);
+	if (root) {
+		get_tdp_mmu_root(vcpu->kvm, root);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+		free_page((unsigned long)new_root->spt);
+		kmem_cache_free(mmu_page_header_cache, new_root);
+		return root;
+	}
+
+	list_add(&new_root->link, &vcpu->kvm->arch.tdp_mmu_roots);
+	spin_unlock(&vcpu->kvm->mmu_lock);
+
+	return new_root;
+}
+
+static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mmu_page *root;
+	union kvm_mmu_page_role role;
+
+	role = vcpu->arch.mmu->mmu_role.base;
+	role.level = vcpu->arch.mmu->shadow_root_level;
+	role.direct = true;
+	role.gpte_is_8_bytes = true;
+	role.access = ACC_ALL;
+
+	spin_lock(&vcpu->kvm->mmu_lock);
+
+	/* Search for an already allocated root with the same role. */
+	root = find_tdp_mmu_root_with_role(vcpu->kvm, role);
+	if (root) {
+		get_tdp_mmu_root(vcpu->kvm, root);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+		return root;
+	}
+
+	spin_unlock(&vcpu->kvm->mmu_lock);
+
+	/* If there is no appropriate root, allocate one. */
+	root = alloc_tdp_mmu_root(vcpu, role);
+
+	return root;
+}
+
+hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mmu_page *root;
+
+	root = get_tdp_mmu_vcpu_root(vcpu);
+	if (!root)
+		return INVALID_PAGE;
+
+	return __pa(root->spt);
 }
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index dd3764f5a9aa3..9274debffeaa1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -7,4 +7,9 @@
 
 void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+
+bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
+hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
+void kvm_tdp_mmu_put_root_hpa(struct kvm *kvm, hpa_t root_hpa);
+
 #endif /* __KVM_X86_MMU_TDP_MMU_H */
-- 
2.28.0.709.gb0816b6eb0-goog


  parent reply	other threads:[~2020-09-25 21:25 UTC|newest]

Thread overview: 105+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-25 21:22 [PATCH 00/22] Introduce the TDP MMU Ben Gardon
2020-09-25 21:22 ` [PATCH 01/22] kvm: mmu: Separate making SPTEs from set_spte Ben Gardon
2020-09-30  4:55   ` Sean Christopherson
2020-09-30 23:03     ` Ben Gardon
2020-09-25 21:22 ` [PATCH 02/22] kvm: mmu: Introduce tdp_iter Ben Gardon
2020-09-26  0:04   ` Paolo Bonzini
2020-09-30  5:06     ` Sean Christopherson
2020-09-26  0:54   ` Paolo Bonzini
2020-09-30  5:08   ` Sean Christopherson
2020-09-30  5:24   ` Sean Christopherson
2020-09-30  6:24     ` Paolo Bonzini
2020-09-30 23:20   ` Eric van Tassell
2020-09-30 23:34     ` Paolo Bonzini
2020-10-01  0:07       ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 03/22] kvm: mmu: Init / Uninit the TDP MMU Ben Gardon
2020-09-26  0:06   ` Paolo Bonzini
2020-09-30  5:34   ` Sean Christopherson
2020-09-30 18:36     ` Ben Gardon
2020-09-30 16:57   ` Sean Christopherson
2020-09-30 17:39     ` Paolo Bonzini
2020-09-30 18:42       ` Ben Gardon
2020-09-25 21:22 ` Ben Gardon [this message]
2020-09-30  6:06   ` [PATCH 04/22] kvm: mmu: Allocate and free TDP MMU roots Sean Christopherson
2020-09-30  6:26     ` Paolo Bonzini
2020-09-30 15:38       ` Sean Christopherson
2020-10-12 22:59     ` Ben Gardon
2020-10-12 23:59       ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 05/22] kvm: mmu: Add functions to handle changed TDP SPTEs Ben Gardon
2020-09-26  0:39   ` Paolo Bonzini
2020-09-28 17:23     ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 06/22] kvm: mmu: Make address space ID a property of memslots Ben Gardon
2020-09-30  6:10   ` Sean Christopherson
2020-09-30 23:11     ` Ben Gardon
2020-09-25 21:22 ` [PATCH 07/22] kvm: mmu: Support zapping SPTEs in the TDP MMU Ben Gardon
2020-09-26  0:14   ` Paolo Bonzini
2020-09-30  6:15   ` Sean Christopherson
2020-09-30  6:28     ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 08/22] kvm: mmu: Separate making non-leaf sptes from link_shadow_page Ben Gardon
2020-09-25 21:22 ` [PATCH 09/22] kvm: mmu: Remove disallowed_hugepage_adjust shadow_walk_iterator arg Ben Gardon
2020-09-30 16:19   ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 10/22] kvm: mmu: Add TDP MMU PF handler Ben Gardon
2020-09-26  0:24   ` Paolo Bonzini
2020-09-30 16:37   ` Sean Christopherson
2020-09-30 16:55     ` Paolo Bonzini
2020-09-30 17:37     ` Paolo Bonzini
2020-10-06 22:35       ` Ben Gardon
2020-10-06 22:33     ` Ben Gardon
2020-10-07 20:55       ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 11/22] kvm: mmu: Factor out allocating a new tdp_mmu_page Ben Gardon
2020-09-26  0:22   ` Paolo Bonzini
2020-09-30 18:53     ` Ben Gardon
2020-09-25 21:22 ` [PATCH 12/22] kvm: mmu: Allocate struct kvm_mmu_pages for all pages in TDP MMU Ben Gardon
2020-09-25 21:22 ` [PATCH 13/22] kvm: mmu: Support invalidate range MMU notifier for " Ben Gardon
2020-09-30 17:03   ` Sean Christopherson
2020-09-30 23:15     ` Ben Gardon
2020-09-30 23:24       ` Sean Christopherson
2020-09-30 23:27         ` Ben Gardon
2020-09-25 21:22 ` [PATCH 14/22] kvm: mmu: Add access tracking for tdp_mmu Ben Gardon
2020-09-26  0:32   ` Paolo Bonzini
2020-09-30 17:48   ` Sean Christopherson
2020-10-06 23:38     ` Ben Gardon
2020-09-25 21:22 ` [PATCH 15/22] kvm: mmu: Support changed pte notifier in tdp MMU Ben Gardon
2020-09-26  0:33   ` Paolo Bonzini
2020-09-28 15:11   ` Paolo Bonzini
2020-10-07 16:53     ` Ben Gardon
2020-10-07 17:18       ` Paolo Bonzini
2020-10-07 17:30         ` Ben Gardon
2020-10-07 17:54           ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 16/22] kvm: mmu: Add dirty logging handler for changed sptes Ben Gardon
2020-09-26  0:45   ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 17/22] kvm: mmu: Support dirty logging for the TDP MMU Ben Gardon
2020-09-26  1:04   ` Paolo Bonzini
2020-10-08 18:27     ` Ben Gardon
2020-09-29 15:07   ` Paolo Bonzini
2020-09-30 18:04   ` Sean Christopherson
2020-09-30 18:08     ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 18/22] kvm: mmu: Support disabling dirty logging for the tdp MMU Ben Gardon
2020-09-26  1:09   ` Paolo Bonzini
2020-10-07 16:30     ` Ben Gardon
2020-10-07 17:21       ` Paolo Bonzini
2020-10-07 17:28         ` Ben Gardon
2020-10-07 17:53           ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 19/22] kvm: mmu: Support write protection for nesting in " Ben Gardon
2020-09-30 18:06   ` Sean Christopherson
2020-09-25 21:23 ` [PATCH 20/22] kvm: mmu: NX largepage recovery for TDP MMU Ben Gardon
2020-09-26  1:14   ` Paolo Bonzini
2020-09-30 22:23     ` Ben Gardon
2020-09-29 18:24   ` Paolo Bonzini
2020-09-30 18:15   ` Sean Christopherson
2020-09-30 19:56     ` Paolo Bonzini
2020-09-30 22:33       ` Ben Gardon
2020-09-30 22:27     ` Ben Gardon
2020-09-25 21:23 ` [PATCH 21/22] kvm: mmu: Support MMIO in the " Ben Gardon
2020-09-30 18:19   ` Sean Christopherson
2020-09-25 21:23 ` [PATCH 22/22] kvm: mmu: Don't clear write flooding count for direct roots Ben Gardon
2020-09-26  1:25   ` Paolo Bonzini
2020-10-05 22:48     ` Ben Gardon
2020-10-05 23:44       ` Sean Christopherson
2020-10-06 16:19         ` Ben Gardon
2020-09-26  1:14 ` [PATCH 00/22] Introduce the TDP MMU Paolo Bonzini
2020-09-28 17:31 ` Paolo Bonzini
2020-09-29 17:40   ` Ben Gardon
2020-09-29 18:10     ` Paolo Bonzini
2020-09-30  6:19 ` Sean Christopherson
2020-09-30  6:30   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200925212302.3979661-5-bgardon@google.com \
    --to=bgardon@google.com \
    --cc=cannonmatthews@google.com \
    --cc=jmattson@google.com \
    --cc=junaids@google.com \
    --cc=kernellwp@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=pfeiner@google.com \
    --cc=pshier@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=xiaoguangrong.eric@gmail.com \
    --cc=yulei.kernel@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).