From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
Sean Christopherson <seanjc@google.com>,
Peter Shier <pshier@google.com>,
Yulei Zhang <yulei.kernel@gmail.com>,
Wanpeng Li <kernellwp@gmail.com>,
Xiao Guangrong <xiaoguangrong.eric@gmail.com>,
Kai Huang <kai.huang@intel.com>,
Keqian Zhu <zhukeqian1@huawei.com>,
Ben Gardon <bgardon@google.com>
Subject: [PATCH v3 8/8] KVM: x86/mmu: Lazily allocate memslot rmaps
Date: Thu, 6 May 2021 11:42:41 -0700 [thread overview]
Message-ID: <20210506184241.618958-9-bgardon@google.com> (raw)
In-Reply-To: <20210506184241.618958-1-bgardon@google.com>
If the TDP MMU is in use, wait to allocate the rmaps until the shadow
MMU is actually used. (i.e. a nested VM is launched.) This saves memory
equal to 0.2% of guest memory in cases where the TDP MMU is used and
there are no nested guests involved.
Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu/mmu.c | 14 ++++++++++---
arch/x86/kvm/mmu/tdp_mmu.c | 6 ++++--
arch/x86/kvm/mmu/tdp_mmu.h | 4 ++--
arch/x86/kvm/x86.c | 37 ++++++++++++++++++++++++++++++++-
5 files changed, 54 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00065f9bbc5e..7b8e1532fb55 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1860,5 +1860,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
int kvm_cpu_dirty_log_size(void);
inline bool kvm_memslots_have_rmaps(struct kvm *kvm);
+int alloc_all_memslots_rmaps(struct kvm *kvm);
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 48067c572c02..e3a3b65829c5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3306,6 +3306,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
}
}
+ r = alloc_all_memslots_rmaps(vcpu->kvm);
+ if (r)
+ return r;
+
write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
if (r < 0)
@@ -5494,9 +5498,13 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
- kvm_mmu_init_tdp_mmu(kvm);
-
- kvm->arch.memslots_have_rmaps = true;
+ if (!kvm_mmu_init_tdp_mmu(kvm))
+ /*
+ * No smp_load/store wrappers needed here as we are in
+ * VM init and there cannot be any memslots / other threads
+ * accessing this struct kvm yet.
+ */
+ kvm->arch.memslots_have_rmaps = true;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 83cbdbe5de5a..5342aca2c8e0 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -14,10 +14,10 @@ static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return;
+ return false;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
@@ -25,6 +25,8 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
+
+ return true;
}
static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 5fdf63090451..b046ab5137a1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -80,12 +80,12 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#ifdef CONFIG_X86_64
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
#else
-static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
+static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1098ab73a704..95e74fb9fc20 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10868,9 +10868,44 @@ static int alloc_memslot_rmap(struct kvm_memory_slot *slot,
return -ENOMEM;
}
+int alloc_all_memslots_rmaps(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int r = 0;
+ int i;
+
+ if (kvm_memslots_have_rmaps(kvm))
+ return 0;
+
+ mutex_lock(&kvm->slots_arch_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(slot, slots) {
+ r = alloc_memslot_rmap(slot, slot->npages);
+ if (r) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return r;
+ }
+ }
+ }
+
+ /*
+ * memslots_have_rmaps is set and read in different lock contexts,
+ * so protect it with smp_load/store.
+ */
+ smp_store_release(&kvm->arch.memslots_have_rmaps, true);
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+}
+
bool kvm_memslots_have_rmaps(struct kvm *kvm)
{
- return kvm->arch.memslots_have_rmaps;
+ /*
+ * memslots_have_rmaps is set and read in different lock contexts,
+ * so protect it with smp_load/store.
+ */
+ return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
}
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
--
2.31.1.607.g51e8a6a459-goog
next prev parent reply other threads:[~2021-05-06 18:43 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-06 18:42 [PATCH v3 0/8] Lazily allocate memslot rmaps Ben Gardon
2021-05-06 18:42 ` [PATCH v3 1/8] KVM: x86/mmu: Deduplicate rmap freeing Ben Gardon
2021-05-07 7:42 ` David Hildenbrand
2021-05-06 18:42 ` [PATCH v3 2/8] KVM: x86/mmu: Factor out allocating memslot rmap Ben Gardon
2021-05-07 7:46 ` David Hildenbrand
2021-05-10 16:29 ` Ben Gardon
2021-05-06 18:42 ` [PATCH v3 3/8] KVM: mmu: Refactor memslot copy Ben Gardon
2021-05-07 7:48 ` David Hildenbrand
2021-05-06 18:42 ` [PATCH v3 4/8] KVM: mmu: Add slots_arch_lock for memslot arch fields Ben Gardon
2021-05-06 18:42 ` [PATCH v3 5/8] KVM: x86/mmu: Add a field to control memslot rmap allocation Ben Gardon
2021-05-06 23:44 ` Ben Gardon
2021-05-07 7:50 ` David Hildenbrand
2021-05-07 8:28 ` Paolo Bonzini
2021-05-10 16:14 ` Ben Gardon
2021-05-10 16:33 ` Paolo Bonzini
2021-05-10 16:37 ` Ben Gardon
2021-05-06 18:42 ` [PATCH v3 6/8] KVM: x86/mmu: Skip rmap operations if rmaps not allocated Ben Gardon
2021-05-06 23:07 ` kernel test robot
2021-05-06 18:42 ` [PATCH v3 7/8] KVM: x86/mmu: Protect rmaps independently with SRCU Ben Gardon
2021-05-06 23:58 ` kernel test robot
2021-05-07 0:56 ` kernel test robot
2021-05-07 8:42 ` Paolo Bonzini
2021-05-10 17:45 ` Sean Christopherson
2021-05-10 17:53 ` Paolo Bonzini
2021-05-10 18:28 ` Sean Christopherson
2021-05-11 16:22 ` Ben Gardon
2021-05-11 16:45 ` Paolo Bonzini
2021-05-06 18:42 ` Ben Gardon [this message]
2021-05-07 1:10 ` [PATCH v3 8/8] KVM: x86/mmu: Lazily allocate memslot rmaps kernel test robot
2021-05-07 8:28 ` Paolo Bonzini
2021-05-07 7:40 ` [PATCH v3 0/8] " David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210506184241.618958-9-bgardon@google.com \
--to=bgardon@google.com \
--cc=kai.huang@intel.com \
--cc=kernellwp@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=pshier@google.com \
--cc=seanjc@google.com \
--cc=xiaoguangrong.eric@gmail.com \
--cc=yulei.kernel@gmail.com \
--cc=zhukeqian1@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).