From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Ben Gardon <bgardon@google.com>
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
Cannon Matthews <cannonmatthews@google.com>,
Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
Peter Shier <pshier@google.com>,
Peter Feiner <pfeiner@google.com>,
Junaid Shahid <junaids@google.com>,
Jim Mattson <jmattson@google.com>,
Yulei Zhang <yulei.kernel@gmail.com>,
Wanpeng Li <kernellwp@gmail.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Xiao Guangrong <xiaoguangrong.eric@gmail.com>
Subject: Re: [PATCH 04/22] kvm: mmu: Allocate and free TDP MMU roots
Date: Tue, 29 Sep 2020 23:06:11 -0700 [thread overview]
Message-ID: <20200930060610.GA29659@linux.intel.com> (raw)
In-Reply-To: <20200925212302.3979661-5-bgardon@google.com>
On Fri, Sep 25, 2020 at 02:22:44PM -0700, Ben Gardon wrote:
static u64 __read_mostly shadow_nx_mask;
> @@ -3597,10 +3592,14 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
> if (!VALID_PAGE(*root_hpa))
> return;
>
> - sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
> - --sp->root_count;
> - if (!sp->root_count && sp->role.invalid)
> - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
> + if (is_tdp_mmu_root(kvm, *root_hpa)) {
> + kvm_tdp_mmu_put_root_hpa(kvm, *root_hpa);
> + } else {
> + sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
> + --sp->root_count;
> + if (!sp->root_count && sp->role.invalid)
> + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
Hmm, I see that future patches use put_tdp_mmu_root()/get_tdp_mmu_root(),
but the code itself isn't specific to the TDP MMU. Even if this ends up
being the only non-TDP user of get/put, I think it'd be worth making them
common helpers, e.g.
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (mmu_put_root(sp) {
if (is_tdp_mmu(...))
kvm_tdp_mmu_free_root(kvm, sp);
else if (sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
}
> + }
>
> *root_hpa = INVALID_PAGE;
> }
> @@ -3691,7 +3690,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
> unsigned i;
>
> if (shadow_root_level >= PT64_ROOT_4LEVEL) {
> - root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
> + if (vcpu->kvm->arch.tdp_mmu_enabled) {
I believe this will break 32-bit NPT. Or at a minimum, look weird. It'd
be better to explicitly disable the TDP MMU on 32-bit KVM, then this becomes
if (vcpu->kvm->arch.tdp_mmu_enabled) {
} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
} else {
}
> + root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
> + } else {
> + root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
> + true);
> + }
May not matter in the end, but the braces aren't needed.
> +
> if (!VALID_PAGE(root))
> return -ENOSPC;
> vcpu->arch.mmu->root_hpa = root;
> diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> index 65bb110847858..530b7d893c7b3 100644
> --- a/arch/x86/kvm/mmu/mmu_internal.h
> +++ b/arch/x86/kvm/mmu/mmu_internal.h
> @@ -41,8 +41,12 @@ struct kvm_mmu_page {
>
> /* Number of writes since the last time traversal visited this page. */
> atomic_t write_flooding_count;
> +
> + bool tdp_mmu_page;
> };
>
> +extern struct kmem_cache *mmu_page_header_cache;
> +
> static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
> {
> struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
> @@ -69,6 +73,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
> #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
>
> +#define ACC_EXEC_MASK 1
> +#define ACC_WRITE_MASK PT_WRITABLE_MASK
> +#define ACC_USER_MASK PT_USER_MASK
> +#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
> +
> /* Functions for interpreting SPTEs */
> kvm_pfn_t spte_to_pfn(u64 pte);
> bool is_mmio_spte(u64 spte);
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 8241e18c111e6..cdca829e42040 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1,5 +1,7 @@
> /* SPDX-License-Identifier: GPL-2.0 */
>
> +#include "mmu.h"
> +#include "mmu_internal.h"
> #include "tdp_mmu.h"
>
> static bool __read_mostly tdp_mmu_enabled = true;
> @@ -25,10 +27,165 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
>
> /* This should not be changed for the lifetime of the VM. */
> kvm->arch.tdp_mmu_enabled = true;
> +
> + INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
> }
>
> void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
> {
> if (!kvm->arch.tdp_mmu_enabled)
> return;
> +
> + WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
> +}
> +
> +#define for_each_tdp_mmu_root(_kvm, _root) \
> + list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
> +
> +bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
> +{
> + struct kvm_mmu_page *root;
> +
> + if (!kvm->arch.tdp_mmu_enabled)
> + return false;
> +
> + root = to_shadow_page(hpa);
> +
> + if (WARN_ON(!root))
> + return false;
> +
> + return root->tdp_mmu_page;
Why all the extra checks?
> +}
> +
> +static void free_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
> +{
> + lockdep_assert_held(&kvm->mmu_lock);
> +
> + WARN_ON(root->root_count);
> + WARN_ON(!root->tdp_mmu_page);
> +
> + list_del(&root->link);
> +
> + free_page((unsigned long)root->spt);
> + kmem_cache_free(mmu_page_header_cache, root);
> +}
> +
> +static void put_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
> +{
> + lockdep_assert_held(&kvm->mmu_lock);
> +
> + root->root_count--;
> + if (!root->root_count)
> + free_tdp_mmu_root(kvm, root);
> +}
> +
> +static void get_tdp_mmu_root(struct kvm *kvm, struct kvm_mmu_page *root)
> +{
> + lockdep_assert_held(&kvm->mmu_lock);
> + WARN_ON(!root->root_count);
> +
> + root->root_count++;
> +}
> +
> +void kvm_tdp_mmu_put_root_hpa(struct kvm *kvm, hpa_t root_hpa)
> +{
> + struct kvm_mmu_page *root;
> +
> + root = to_shadow_page(root_hpa);
> +
> + if (WARN_ON(!root))
> + return;
> +
> + put_tdp_mmu_root(kvm, root);
> +}
> +
> +static struct kvm_mmu_page *find_tdp_mmu_root_with_role(
> + struct kvm *kvm, union kvm_mmu_page_role role)
> +{
> + struct kvm_mmu_page *root;
> +
> + lockdep_assert_held(&kvm->mmu_lock);
> + for_each_tdp_mmu_root(kvm, root) {
> + WARN_ON(!root->root_count);
> +
> + if (root->role.word == role.word)
> + return root;
> + }
> +
> + return NULL;
> +}
> +
> +static struct kvm_mmu_page *alloc_tdp_mmu_root(struct kvm_vcpu *vcpu,
> + union kvm_mmu_page_role role)
> +{
> + struct kvm_mmu_page *new_root;
> + struct kvm_mmu_page *root;
> +
> + new_root = kvm_mmu_memory_cache_alloc(
> + &vcpu->arch.mmu_page_header_cache);
> + new_root->spt = kvm_mmu_memory_cache_alloc(
> + &vcpu->arch.mmu_shadow_page_cache);
> + set_page_private(virt_to_page(new_root->spt), (unsigned long)new_root);
> +
> + new_root->role.word = role.word;
> + new_root->root_count = 1;
> + new_root->gfn = 0;
> + new_root->tdp_mmu_page = true;
> +
> + spin_lock(&vcpu->kvm->mmu_lock);
> +
> + /* Check that no matching root exists before adding this one. */
> + root = find_tdp_mmu_root_with_role(vcpu->kvm, role);
> + if (root) {
> + get_tdp_mmu_root(vcpu->kvm, root);
> + spin_unlock(&vcpu->kvm->mmu_lock);
Hrm, I'm not a big fan of dropping locks in the middle of functions, but the
alternatives aren't great. :-/ Best I can come up with is
if (root)
get_tdp_mmu_root()
else
list_add();
spin_unlock();
if (root) {
free_page()
kmem_cache_free()
} else {
root = new_root;
}
return root;
Not sure that's any better.
> + free_page((unsigned long)new_root->spt);
> + kmem_cache_free(mmu_page_header_cache, new_root);
> + return root;
> + }
> +
> + list_add(&new_root->link, &vcpu->kvm->arch.tdp_mmu_roots);
> + spin_unlock(&vcpu->kvm->mmu_lock);
> +
> + return new_root;
> +}
> +
> +static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
> +{
> + struct kvm_mmu_page *root;
> + union kvm_mmu_page_role role;
> +
> + role = vcpu->arch.mmu->mmu_role.base;
> + role.level = vcpu->arch.mmu->shadow_root_level;
> + role.direct = true;
> + role.gpte_is_8_bytes = true;
> + role.access = ACC_ALL;
> +
> + spin_lock(&vcpu->kvm->mmu_lock);
> +
> + /* Search for an already allocated root with the same role. */
> + root = find_tdp_mmu_root_with_role(vcpu->kvm, role);
> + if (root) {
> + get_tdp_mmu_root(vcpu->kvm, root);
> + spin_unlock(&vcpu->kvm->mmu_lock);
Rather than manually unlock and return, this can be
if (root)
get_tdp_mmju_root();
spin_unlock()
if (!root)
root = alloc_tdp_mmu_root();
return root;
You could also add a helper to do the "get" along with the "find". Not sure
if that's worth the code.
> + return root;
> + }
> +
> + spin_unlock(&vcpu->kvm->mmu_lock);
> +
> + /* If there is no appropriate root, allocate one. */
> + root = alloc_tdp_mmu_root(vcpu, role);
> +
> + return root;
> +}
> +
> +hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
> +{
> + struct kvm_mmu_page *root;
> +
> + root = get_tdp_mmu_vcpu_root(vcpu);
> + if (!root)
> + return INVALID_PAGE;
> +
> + return __pa(root->spt);
> }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index dd3764f5a9aa3..9274debffeaa1 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -7,4 +7,9 @@
>
> void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
> void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
> +
> +bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
> +hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
> +void kvm_tdp_mmu_put_root_hpa(struct kvm *kvm, hpa_t root_hpa);
> +
> #endif /* __KVM_X86_MMU_TDP_MMU_H */
> --
> 2.28.0.709.gb0816b6eb0-goog
>
next prev parent reply other threads:[~2020-09-30 6:06 UTC|newest]
Thread overview: 105+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-25 21:22 [PATCH 00/22] Introduce the TDP MMU Ben Gardon
2020-09-25 21:22 ` [PATCH 01/22] kvm: mmu: Separate making SPTEs from set_spte Ben Gardon
2020-09-30 4:55 ` Sean Christopherson
2020-09-30 23:03 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 02/22] kvm: mmu: Introduce tdp_iter Ben Gardon
2020-09-26 0:04 ` Paolo Bonzini
2020-09-30 5:06 ` Sean Christopherson
2020-09-26 0:54 ` Paolo Bonzini
2020-09-30 5:08 ` Sean Christopherson
2020-09-30 5:24 ` Sean Christopherson
2020-09-30 6:24 ` Paolo Bonzini
2020-09-30 23:20 ` Eric van Tassell
2020-09-30 23:34 ` Paolo Bonzini
2020-10-01 0:07 ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 03/22] kvm: mmu: Init / Uninit the TDP MMU Ben Gardon
2020-09-26 0:06 ` Paolo Bonzini
2020-09-30 5:34 ` Sean Christopherson
2020-09-30 18:36 ` Ben Gardon
2020-09-30 16:57 ` Sean Christopherson
2020-09-30 17:39 ` Paolo Bonzini
2020-09-30 18:42 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 04/22] kvm: mmu: Allocate and free TDP MMU roots Ben Gardon
2020-09-30 6:06 ` Sean Christopherson [this message]
2020-09-30 6:26 ` Paolo Bonzini
2020-09-30 15:38 ` Sean Christopherson
2020-10-12 22:59 ` Ben Gardon
2020-10-12 23:59 ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 05/22] kvm: mmu: Add functions to handle changed TDP SPTEs Ben Gardon
2020-09-26 0:39 ` Paolo Bonzini
2020-09-28 17:23 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 06/22] kvm: mmu: Make address space ID a property of memslots Ben Gardon
2020-09-30 6:10 ` Sean Christopherson
2020-09-30 23:11 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 07/22] kvm: mmu: Support zapping SPTEs in the TDP MMU Ben Gardon
2020-09-26 0:14 ` Paolo Bonzini
2020-09-30 6:15 ` Sean Christopherson
2020-09-30 6:28 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 08/22] kvm: mmu: Separate making non-leaf sptes from link_shadow_page Ben Gardon
2020-09-25 21:22 ` [PATCH 09/22] kvm: mmu: Remove disallowed_hugepage_adjust shadow_walk_iterator arg Ben Gardon
2020-09-30 16:19 ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 10/22] kvm: mmu: Add TDP MMU PF handler Ben Gardon
2020-09-26 0:24 ` Paolo Bonzini
2020-09-30 16:37 ` Sean Christopherson
2020-09-30 16:55 ` Paolo Bonzini
2020-09-30 17:37 ` Paolo Bonzini
2020-10-06 22:35 ` Ben Gardon
2020-10-06 22:33 ` Ben Gardon
2020-10-07 20:55 ` Sean Christopherson
2020-09-25 21:22 ` [PATCH 11/22] kvm: mmu: Factor out allocating a new tdp_mmu_page Ben Gardon
2020-09-26 0:22 ` Paolo Bonzini
2020-09-30 18:53 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 12/22] kvm: mmu: Allocate struct kvm_mmu_pages for all pages in TDP MMU Ben Gardon
2020-09-25 21:22 ` [PATCH 13/22] kvm: mmu: Support invalidate range MMU notifier for " Ben Gardon
2020-09-30 17:03 ` Sean Christopherson
2020-09-30 23:15 ` Ben Gardon
2020-09-30 23:24 ` Sean Christopherson
2020-09-30 23:27 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 14/22] kvm: mmu: Add access tracking for tdp_mmu Ben Gardon
2020-09-26 0:32 ` Paolo Bonzini
2020-09-30 17:48 ` Sean Christopherson
2020-10-06 23:38 ` Ben Gardon
2020-09-25 21:22 ` [PATCH 15/22] kvm: mmu: Support changed pte notifier in tdp MMU Ben Gardon
2020-09-26 0:33 ` Paolo Bonzini
2020-09-28 15:11 ` Paolo Bonzini
2020-10-07 16:53 ` Ben Gardon
2020-10-07 17:18 ` Paolo Bonzini
2020-10-07 17:30 ` Ben Gardon
2020-10-07 17:54 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 16/22] kvm: mmu: Add dirty logging handler for changed sptes Ben Gardon
2020-09-26 0:45 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 17/22] kvm: mmu: Support dirty logging for the TDP MMU Ben Gardon
2020-09-26 1:04 ` Paolo Bonzini
2020-10-08 18:27 ` Ben Gardon
2020-09-29 15:07 ` Paolo Bonzini
2020-09-30 18:04 ` Sean Christopherson
2020-09-30 18:08 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 18/22] kvm: mmu: Support disabling dirty logging for the tdp MMU Ben Gardon
2020-09-26 1:09 ` Paolo Bonzini
2020-10-07 16:30 ` Ben Gardon
2020-10-07 17:21 ` Paolo Bonzini
2020-10-07 17:28 ` Ben Gardon
2020-10-07 17:53 ` Paolo Bonzini
2020-09-25 21:22 ` [PATCH 19/22] kvm: mmu: Support write protection for nesting in " Ben Gardon
2020-09-30 18:06 ` Sean Christopherson
2020-09-25 21:23 ` [PATCH 20/22] kvm: mmu: NX largepage recovery for TDP MMU Ben Gardon
2020-09-26 1:14 ` Paolo Bonzini
2020-09-30 22:23 ` Ben Gardon
2020-09-29 18:24 ` Paolo Bonzini
2020-09-30 18:15 ` Sean Christopherson
2020-09-30 19:56 ` Paolo Bonzini
2020-09-30 22:33 ` Ben Gardon
2020-09-30 22:27 ` Ben Gardon
2020-09-25 21:23 ` [PATCH 21/22] kvm: mmu: Support MMIO in the " Ben Gardon
2020-09-30 18:19 ` Sean Christopherson
2020-09-25 21:23 ` [PATCH 22/22] kvm: mmu: Don't clear write flooding count for direct roots Ben Gardon
2020-09-26 1:25 ` Paolo Bonzini
2020-10-05 22:48 ` Ben Gardon
2020-10-05 23:44 ` Sean Christopherson
2020-10-06 16:19 ` Ben Gardon
2020-09-26 1:14 ` [PATCH 00/22] Introduce the TDP MMU Paolo Bonzini
2020-09-28 17:31 ` Paolo Bonzini
2020-09-29 17:40 ` Ben Gardon
2020-09-29 18:10 ` Paolo Bonzini
2020-09-30 6:19 ` Sean Christopherson
2020-09-30 6:30 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200930060610.GA29659@linux.intel.com \
--to=sean.j.christopherson@intel.com \
--cc=bgardon@google.com \
--cc=cannonmatthews@google.com \
--cc=jmattson@google.com \
--cc=junaids@google.com \
--cc=kernellwp@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=pfeiner@google.com \
--cc=pshier@google.com \
--cc=vkuznets@redhat.com \
--cc=xiaoguangrong.eric@gmail.com \
--cc=yulei.kernel@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).