linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	David Matlack <dmatlack@google.com>,
	Vipin Sharma <vipinsh@google.com>,
	Nagareddy Reddy <nspreddy@google.com>,
	Ben Gardon <bgardon@google.com>
Subject: [RFC 05/14] KVM: x86/MMU: Move paging_tmpl.h includes to shadow_mmu.c
Date: Wed, 21 Dec 2022 22:24:09 +0000	[thread overview]
Message-ID: <20221221222418.3307832-6-bgardon@google.com> (raw)
In-Reply-To: <20221221222418.3307832-1-bgardon@google.com>

Move the integration point for paging_tmpl.h to shadow_mmu.c since
paging_tmpl.h is ostensibly part of the Shadow MMU. This requires
modifying some of the definitions to be non-static and then exporting
the pre-processed function names through shadow_mmu.h since they are
needed for mmu context callbacks in mmu.c. This will facilitate cleanups
in following commits because many of the functions being exposed by
shadow_mmu.h are only needed by paging_tmpl.h. Those functions will no
longer need to be exported.

sync_mmio_spte() is only used by paging_tmpl.h, so move it along with
the includes.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c         | 29 -----------------------------
 arch/x86/kvm/mmu/paging_tmpl.h | 11 +++++------
 arch/x86/kvm/mmu/shadow_mmu.c  | 30 ++++++++++++++++++++++++++++++
 arch/x86/kvm/mmu/shadow_mmu.h  | 25 ++++++++++++++++++++++++-
 4 files changed, 59 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a17e8a79e4df..dd97e346c786 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1699,35 +1699,6 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 	return kvm_read_cr3(vcpu);
 }
 
-static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
-			   unsigned int access)
-{
-	if (unlikely(is_mmio_spte(*sptep))) {
-		if (gfn != get_mmio_spte_gfn(*sptep)) {
-			mmu_spte_clear_no_track(sptep);
-			return true;
-		}
-
-		mark_mmio_spte(vcpu, sptep, gfn, access);
-		return true;
-	}
-
-	return false;
-}
-
-#define PTTYPE_EPT 18 /* arbitrary */
-#define PTTYPE PTTYPE_EPT
-#include "paging_tmpl.h"
-#undef PTTYPE
-
-#define PTTYPE 64
-#include "paging_tmpl.h"
-#undef PTTYPE
-
-#define PTTYPE 32
-#include "paging_tmpl.h"
-#undef PTTYPE
-
 static void
 __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 0f6455072055..2e3b2aca64ad 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -787,7 +787,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
 	struct guest_walker walker;
 	int r;
@@ -897,7 +897,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 }
 
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
+void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	struct kvm_mmu_page *sp;
@@ -957,9 +957,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
 }
 
 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-			       gpa_t addr, u64 access,
-			       struct x86_exception *exception)
+gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t addr,
+			u64 access, struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = INVALID_GPA;
@@ -992,7 +991,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  *   0: the sp is synced and no tlb flushing is required
  * > 0: the sp is synced and tlb flushing is required
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
 	int i;
diff --git a/arch/x86/kvm/mmu/shadow_mmu.c b/arch/x86/kvm/mmu/shadow_mmu.c
index 05d8f5be559d..86b5fb75d50a 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.c
+++ b/arch/x86/kvm/mmu/shadow_mmu.c
@@ -10,6 +10,7 @@
  * Shadow MMU also supports TDP, it's just less scalable. The Shadow and TDP
  * MMUs can cooperate to support nested virtualization on hardware with TDP.
  */
+#include "ioapic.h"
 #include "mmu.h"
 #include "mmu_internal.h"
 #include "mmutrace.h"
@@ -2798,6 +2799,35 @@ void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
 	walk_shadow_page_lockless_end(vcpu);
 }
 
+static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
+			   unsigned int access)
+{
+	if (unlikely(is_mmio_spte(*sptep))) {
+		if (gfn != get_mmio_spte_gfn(*sptep)) {
+			mmu_spte_clear_no_track(sptep);
+			return true;
+		}
+
+		mark_mmio_spte(vcpu, sptep, gfn, access);
+		return true;
+	}
+
+	return false;
+}
+
+#define PTTYPE_EPT 18 /* arbitrary */
+#define PTTYPE PTTYPE_EPT
+#include "paging_tmpl.h"
+#undef PTTYPE
+
+#define PTTYPE 64
+#include "paging_tmpl.h"
+#undef PTTYPE
+
+#define PTTYPE 32
+#include "paging_tmpl.h"
+#undef PTTYPE
+
 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
 {
 	struct kvm_mmu_page *sp;
diff --git a/arch/x86/kvm/mmu/shadow_mmu.h b/arch/x86/kvm/mmu/shadow_mmu.h
index 83876047c1f5..00d2f9abecf0 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.h
+++ b/arch/x86/kvm/mmu/shadow_mmu.h
@@ -73,7 +73,6 @@ bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 		       int level, pte_t unused);
 
 void drop_parent_pte(struct kvm_mmu_page *sp, u64 *parent_pte);
-int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
 int mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent,
 		      bool can_yield);
 void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp);
@@ -150,4 +149,28 @@ void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
 				    const struct kvm_memory_slot *slot);
 
 unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc);
+
+/* Exports from paging_tmpl.h */
+gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+			  gpa_t vaddr, u64 access,
+			  struct x86_exception *exception);
+gpa_t paging64_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+			  gpa_t vaddr, u64 access,
+			  struct x86_exception *exception);
+gpa_t ept_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t vaddr,
+		     u64 access, struct x86_exception *exception);
+
+int paging32_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+int paging64_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+int ept_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+
+int paging32_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+int paging64_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+int ept_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+/* Defined in shadow_mmu.c. */
+int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+
+void paging32_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root);
+void paging64_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root);
+void ept_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root);
 #endif /* __KVM_X86_MMU_SHADOW_MMU_H */
-- 
2.39.0.314.g84b9a713c41-goog


  parent reply	other threads:[~2022-12-21 22:24 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-21 22:24 [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU Ben Gardon
2022-12-21 22:24 ` [RFC 01/14] KVM: x86/MMU: Add shadow_mmu.(c|h) Ben Gardon
2023-02-01 19:45   ` Sean Christopherson
2023-02-01 19:48     ` Ben Gardon
2022-12-21 22:24 ` [RFC 02/14] KVM: x86/MMU: Expose functions for the Shadow MMU Ben Gardon
2022-12-21 22:24 ` [RFC 03/14] KVM: x86/MMU: Move the Shadow MMU implementation to shadow_mmu.c Ben Gardon
2022-12-21 22:40   ` Ben Gardon
2023-01-13 18:06     ` Vipin Sharma
2022-12-21 22:24 ` [RFC 04/14] KVM: x86/MMU: Expose functions for paging_tmpl.h Ben Gardon
2023-01-06 19:49   ` David Matlack
2023-01-09 18:47     ` Ben Gardon
2022-12-21 22:24 ` Ben Gardon [this message]
2022-12-21 22:24 ` [RFC 06/14] KVM: x86/MMU: Clean up Shadow MMU exports Ben Gardon
2022-12-21 22:24 ` [RFC 07/14] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU Ben Gardon
2023-01-13 17:43   ` Vipin Sharma
2023-01-13 17:51   ` Vipin Sharma
2022-12-21 22:24 ` [RFC 08/14] KVM: x86/MMU: Clean up naming of exported Shadow MMU functions Ben Gardon
2022-12-21 22:24 ` [RFC 09/14] KVM: x86/MMU: Only make pages available on Shadow MMU fault Ben Gardon
2022-12-29 18:30   ` David Matlack
2022-12-21 22:24 ` [RFC 10/14] KVM: x86/MMU: Fix naming on prepare / commit zap page functions Ben Gardon
2022-12-21 22:24 ` [RFC 11/14] KVM: x86/MMU: Factor Shadow MMU wrprot / clear dirty ops out of mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 12/14] KVM: x86/MMU: Remove unneeded exports from shadow_mmu.c Ben Gardon
2022-12-21 22:24 ` [RFC 13/14] KVM: x86/MMU: Wrap uses of kvm_handle_gfn_range in mmu.c Ben Gardon
2023-02-01 21:25   ` Sean Christopherson
2023-02-01 22:30     ` Ben Gardon
2022-12-21 22:24 ` [RFC 14/14] KVM: x86/MMU: Add kvm_shadow_mmu_ to the last few functions in shadow_mmu.h Ben Gardon
2023-01-06 19:18 ` [RFC 00/14] KVM: x86/MMU: Formalize the Shadow MMU David Matlack
2023-01-09 18:43   ` Ben Gardon
2023-02-01 20:02 ` Sean Christopherson
2023-02-01 20:45   ` Ben Gardon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221221222418.3307832-6-bgardon@google.com \
    --to=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=nspreddy@google.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=seanjc@google.com \
    --cc=vipinsh@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).