All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Matlack <dmatlack@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	leksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Xu <peterx@redhat.com>, Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Peter Feiner <pfeiner@google.com>,
	Andrew Jones <drjones@redhat.com>,
	maciej.szmigiero@oracle.com, kvm@vger.kernel.org,
	David Matlack <dmatlack@google.com>
Subject: [PATCH 12/23] KVM: x86/mmu: Decouple rmap_add() and link_shadow_page() from kvm_vcpu
Date: Thu,  3 Feb 2022 01:00:40 +0000	[thread overview]
Message-ID: <20220203010051.2813563-13-dmatlack@google.com> (raw)
In-Reply-To: <20220203010051.2813563-1-dmatlack@google.com>

Allow adding new entries to the rmap and linking shadow pages without a
struct kvm_vcpu pointer by moving the implementation of rmap_add() and
link_shadow_page() into inner helper functions.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index de7c47ee0def..c2f7f026d414 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -736,9 +736,9 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
-static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
+static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_mmu_memory_cache *cache)
 {
-	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
+	return kvm_mmu_memory_cache_alloc(cache);
 }
 
 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -885,7 +885,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 /*
  * Returns the number of pointers in the rmap chain, not counting the new one.
  */
-static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
+static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
 			struct kvm_rmap_head *rmap_head)
 {
 	struct pte_list_desc *desc;
@@ -896,7 +896,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 		rmap_head->val = (unsigned long)spte;
 	} else if (!(rmap_head->val & 1)) {
 		rmap_printk("%p %llx 1->many\n", spte, *spte);
-		desc = mmu_alloc_pte_list_desc(vcpu);
+		desc = mmu_alloc_pte_list_desc(cache);
 		desc->sptes[0] = (u64 *)rmap_head->val;
 		desc->sptes[1] = spte;
 		desc->spte_count = 2;
@@ -908,7 +908,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 		while (desc->spte_count == PTE_LIST_EXT) {
 			count += PTE_LIST_EXT;
 			if (!desc->more) {
-				desc->more = mmu_alloc_pte_list_desc(vcpu);
+				desc->more = mmu_alloc_pte_list_desc(cache);
 				desc = desc->more;
 				desc->spte_count = 0;
 				break;
@@ -1607,8 +1607,10 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 
 #define RMAP_RECYCLE_THRESHOLD 1000
 
-static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
-		     u64 *spte, gfn_t gfn)
+static void __rmap_add(struct kvm *kvm,
+		       struct kvm_mmu_memory_cache *cache,
+		       const struct kvm_memory_slot *slot,
+		       u64 *spte, gfn_t gfn)
 {
 	struct kvm_mmu_page *sp;
 	struct kvm_rmap_head *rmap_head;
@@ -1617,15 +1619,21 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
 	sp = sptep_to_sp(spte);
 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
-	rmap_count = pte_list_add(vcpu, spte, rmap_head);
+	rmap_count = pte_list_add(cache, spte, rmap_head);
 
 	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
-		kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
+		kvm_unmap_rmapp(kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
 		kvm_flush_remote_tlbs_with_address(
-				vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+				kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
 	}
 }
 
+static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
+		     u64 *spte, gfn_t gfn)
+{
+	__rmap_add(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, slot, spte, gfn);
+}
+
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
 	bool young = false;
@@ -1693,13 +1701,13 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
 }
 
-static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
+static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
 				    struct kvm_mmu_page *sp, u64 *parent_pte)
 {
 	if (!parent_pte)
 		return;
 
-	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
+	pte_list_add(cache, parent_pte, &sp->parent_ptes);
 }
 
 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
@@ -2297,8 +2305,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
 	__shadow_walk_next(iterator, *iterator->sptep);
 }
 
-static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
-			     struct kvm_mmu_page *sp)
+static void __link_shadow_page(struct kvm_mmu_memory_cache *cache, u64 *sptep,
+			       struct kvm_mmu_page *sp)
 {
 	u64 spte;
 
@@ -2308,12 +2316,17 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
 
 	mmu_spte_set(sptep, spte);
 
-	mmu_page_add_parent_pte(vcpu, sp, sptep);
+	mmu_page_add_parent_pte(cache, sp, sptep);
 
 	if (sp->unsync_children || sp->unsync)
 		mark_unsync(sptep);
 }
 
+static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp)
+{
+	__link_shadow_page(&vcpu->arch.mmu_pte_list_desc_cache, sptep, sp);
+}
+
 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 				   unsigned direct_access)
 {
-- 
2.35.0.rc2.247.g8bbb082509-goog


  parent reply	other threads:[~2022-02-03  1:01 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-03  1:00 [PATCH 00/23] Extend Eager Page Splitting to the shadow MMU David Matlack
2022-02-03  1:00 ` [PATCH 01/23] KVM: x86/mmu: Optimize MMU page cache lookup for all direct SPs David Matlack
2022-02-19  0:57   ` Sean Christopherson
2022-02-03  1:00 ` [PATCH 02/23] KVM: x86/mmu: Derive shadow MMU page role from parent David Matlack
2022-02-19  1:14   ` Sean Christopherson
2022-02-24 18:45     ` David Matlack
2022-03-04  0:22     ` David Matlack
2022-02-03  1:00 ` [PATCH 03/23] KVM: x86/mmu: Decompose kvm_mmu_get_page() into separate functions David Matlack
2022-02-19  1:25   ` Sean Christopherson
2022-02-24 18:54     ` David Matlack
2022-02-03  1:00 ` [PATCH 04/23] KVM: x86/mmu: Rename shadow MMU functions that deal with shadow pages David Matlack
2022-02-03  1:00 ` [PATCH 05/23] KVM: x86/mmu: Pass memslot to kvm_mmu_create_sp() David Matlack
2022-02-03  1:00 ` [PATCH 06/23] KVM: x86/mmu: Separate shadow MMU sp allocation from initialization David Matlack
2022-02-16 19:37   ` Ben Gardon
2022-02-16 21:42     ` David Matlack
2022-02-03  1:00 ` [PATCH 07/23] KVM: x86/mmu: Move huge page split sp allocation code to mmu.c David Matlack
2022-02-03  1:00 ` [PATCH 08/23] KVM: x86/mmu: Use common code to free kvm_mmu_page structs David Matlack
2022-02-03  1:00 ` [PATCH 09/23] KVM: x86/mmu: Use common code to allocate kvm_mmu_page structs from vCPU caches David Matlack
2022-02-03  1:00 ` [PATCH 10/23] KVM: x86/mmu: Pass const memslot to rmap_add() David Matlack
2022-02-23 23:25   ` Ben Gardon
2022-02-03  1:00 ` [PATCH 11/23] KVM: x86/mmu: Pass const memslot to kvm_mmu_init_sp() and descendants David Matlack
2022-02-23 23:27   ` Ben Gardon
2022-02-03  1:00 ` David Matlack [this message]
2022-02-23 23:30   ` [PATCH 12/23] KVM: x86/mmu: Decouple rmap_add() and link_shadow_page() from kvm_vcpu Ben Gardon
2022-02-03  1:00 ` [PATCH 13/23] KVM: x86/mmu: Update page stats in __rmap_add() David Matlack
2022-02-23 23:32   ` Ben Gardon
2022-02-23 23:35     ` Ben Gardon
2022-02-03  1:00 ` [PATCH 14/23] KVM: x86/mmu: Cache the access bits of shadowed translations David Matlack
2022-02-28 20:30   ` Ben Gardon
2022-02-03  1:00 ` [PATCH 15/23] KVM: x86/mmu: Pass access information to make_huge_page_split_spte() David Matlack
2022-02-28 20:32   ` Ben Gardon
2022-02-03  1:00 ` [PATCH 16/23] KVM: x86/mmu: Zap collapsible SPTEs at all levels in the shadow MMU David Matlack
2022-02-28 20:39   ` Ben Gardon
2022-03-03 19:42     ` David Matlack
2022-02-03  1:00 ` [PATCH 17/23] KVM: x86/mmu: Pass bool flush parameter to drop_large_spte() David Matlack
2022-02-28 20:47   ` Ben Gardon
2022-03-03 19:52     ` David Matlack
2022-02-03  1:00 ` [PATCH 18/23] KVM: x86/mmu: Extend Eager Page Splitting to the shadow MMU David Matlack
2022-02-28 21:09   ` Ben Gardon
2022-02-28 23:29     ` David Matlack
2022-02-03  1:00 ` [PATCH 19/23] KVM: Allow for different capacities in kvm_mmu_memory_cache structs David Matlack
2022-02-24 11:28   ` Marc Zyngier
2022-02-24 19:20     ` David Matlack
2022-03-04 21:59       ` David Matlack
2022-03-04 22:24         ` David Matlack
2022-03-05 16:55         ` Marc Zyngier
2022-03-07 23:49           ` David Matlack
2022-03-08  7:42             ` Marc Zyngier
2022-03-09 21:49             ` David Matlack
2022-03-10  8:30               ` Marc Zyngier
2022-02-03  1:00 ` [PATCH 20/23] KVM: Allow GFP flags to be passed when topping up MMU caches David Matlack
2022-02-28 21:12   ` Ben Gardon
2022-02-03  1:00 ` [PATCH 21/23] KVM: x86/mmu: Fully split huge pages that require extra pte_list_desc structs David Matlack
2022-02-28 21:22   ` Ben Gardon
2022-02-28 23:41     ` David Matlack
2022-03-01  0:37       ` Ben Gardon
2022-03-03 19:59         ` David Matlack
2022-02-03  1:00 ` [PATCH 22/23] KVM: x86/mmu: Split huge pages aliased by multiple SPTEs David Matlack
2022-02-03  1:00 ` [PATCH 23/23] KVM: selftests: Map x86_64 guest virtual memory with huge pages David Matlack
2022-03-07  5:21 ` [PATCH 00/23] Extend Eager Page Splitting to the shadow MMU Peter Xu
2022-03-07 23:39   ` David Matlack
2022-03-09  7:31     ` Peter Xu
2022-03-09 23:39       ` David Matlack
2022-03-10  7:03         ` Peter Xu
2022-03-10 19:26           ` David Matlack

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220203010051.2813563-13-dmatlack@google.com \
    --to=dmatlack@google.com \
    --cc=aleksandar.qemu.devel@gmail.com \
    --cc=chenhuacai@kernel.org \
    --cc=drjones@redhat.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=maciej.szmigiero@oracle.com \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=pfeiner@google.com \
    --cc=seanjc@google.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.