From: David Matlack <dmatlack@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: kvm@vger.kernel.org, Ben Gardon <bgardon@google.com>,
Joerg Roedel <joro@8bytes.org>, Jim Mattson <jmattson@google.com>,
Wanpeng Li <wanpengli@tencent.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Sean Christopherson <seanjc@google.com>,
David Matlack <dmatlack@google.com>
Subject: [RFC PATCH 5/6] KVM: x86/mmu: Avoid memslot lookup in rmap_add
Date: Fri, 13 Aug 2021 20:35:03 +0000 [thread overview]
Message-ID: <20210813203504.2742757-6-dmatlack@google.com> (raw)
In-Reply-To: <20210813203504.2742757-1-dmatlack@google.com>
Avoid the memslot lookup in rmap_add by passing it down from the fault
handling code to mmu_set_spte and then to rmap_add.
No functional change intended.
Signed-off-by: David Matlack <dmatlack@google.com>
---
arch/x86/kvm/mmu/mmu.c | 29 ++++++++---------------------
arch/x86/kvm/mmu/paging_tmpl.h | 12 +++++++++---
2 files changed, 17 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c148d481e9b5..41e2ef8ad09b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1630,16 +1630,15 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
#define RMAP_RECYCLE_THRESHOLD 1000
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+ u64 *spte, gfn_t gfn)
{
- struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;
int rmap_count;
sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
rmap_count = pte_list_add(vcpu, spte, rmap_head);
@@ -2679,9 +2678,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
return ret;
}
-static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
- unsigned int pte_access, bool write_fault, int level,
- gfn_t gfn, kvm_pfn_t pfn, bool speculative,
+static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+ u64 *sptep, unsigned int pte_access, bool write_fault,
+ int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool host_writable)
{
int was_rmapped = 0;
@@ -2744,24 +2743,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped) {
kvm_update_page_stats(vcpu->kvm, level, 1);
- rmap_add(vcpu, sptep, gfn);
+ rmap_add(vcpu, slot, sptep, gfn);
}
return ret;
}
-static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool no_dirty_log)
-{
- struct kvm_memory_slot *slot;
-
- slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
- if (!slot)
- return KVM_PFN_ERR_FAULT;
-
- return gfn_to_pfn_memslot_atomic(slot, gfn);
-}
-
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
@@ -2782,7 +2769,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
return -1;
for (i = 0; i < ret; i++, gfn++, start++) {
- mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
+ mmu_set_spte(vcpu, slot, start, access, false, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
put_page(pages[i]);
}
@@ -2979,7 +2966,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
account_huge_nx_page(vcpu->kvm, sp);
}
- ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
+ ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
fault->write, fault->goal_level, base_gfn, fault->pfn,
fault->prefault, fault->map_writable);
if (ret == RET_PF_SPURIOUS)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 50ade6450ace..653ca44afa58 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -561,6 +561,7 @@ static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, pt_element_t gpte, bool no_dirty_log)
{
+ struct kvm_memory_slot *slot;
unsigned pte_access;
gfn_t gfn;
kvm_pfn_t pfn;
@@ -573,8 +574,13 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
- pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
+
+ slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
+ if (!slot)
+ return false;
+
+ pfn = gfn_to_pfn_memslot_atomic(slot, gfn);
if (is_error_pfn(pfn))
return false;
@@ -582,7 +588,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* we call mmu_set_spte() with host_writable = true because
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/
- mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
+ mmu_set_spte(vcpu, slot, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
true, true);
kvm_release_pfn_clean(pfn);
@@ -749,7 +755,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
}
}
- ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write,
+ ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, fault->write,
it.level, base_gfn, fault->pfn, fault->prefault,
fault->map_writable);
if (ret == RET_PF_SPURIOUS)
--
2.33.0.rc1.237.g0d66db33f3-goog
next prev parent reply other threads:[~2021-08-13 20:35 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-13 20:34 [RFC PATCH 0/6] Pass memslot around during page fault handling David Matlack
2021-08-13 20:34 ` [RFC PATCH 1/6] KVM: x86/mmu: Rename try_async_pf to kvm_faultin_pfn in comment David Matlack
2021-08-13 20:35 ` [RFC PATCH 2/6] KVM: x86/mmu: Fold rmap_recycle into rmap_add David Matlack
2021-08-13 20:35 ` [RFC PATCH 3/6] KVM: x86/mmu: Pass the memslot around via struct kvm_page_fault David Matlack
2021-08-17 13:00 ` Paolo Bonzini
2021-08-17 16:13 ` David Matlack
2021-08-17 17:02 ` Paolo Bonzini
2021-08-19 16:37 ` Sean Christopherson
2021-08-20 22:54 ` David Matlack
2021-08-20 23:02 ` Sean Christopherson
2021-08-13 20:35 ` [RFC PATCH 4/6] KVM: x86/mmu: Avoid memslot lookup in page_fault_handle_page_track David Matlack
2021-08-13 20:35 ` David Matlack [this message]
2021-08-17 12:03 ` [RFC PATCH 5/6] KVM: x86/mmu: Avoid memslot lookup in rmap_add Paolo Bonzini
2021-08-19 16:15 ` David Matlack
2021-08-19 16:39 ` Sean Christopherson
2021-08-19 16:47 ` Paolo Bonzini
2021-08-13 20:35 ` [RFC PATCH 6/6] KVM: x86/mmu: Avoid memslot lookup in mmu_try_to_unsync_pages David Matlack
2021-08-17 11:12 ` [RFC PATCH 0/6] Pass memslot around during page fault handling Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210813203504.2742757-6-dmatlack@google.com \
--to=dmatlack@google.com \
--cc=bgardon@google.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).