All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wei Wang <wei.w.wang@intel.com>
To: seanjc@google.com, pbonzini@redhat.com, bgardon@google.com,
	dmatlack@google.com
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Wei Wang <wei.w.wang@intel.com>
Subject: [PATCH v1] KVM: x86/mmu: refactor kvm_tdp_mmu_map
Date: Wed,  2 Aug 2023 22:27:37 +0800	[thread overview]
Message-ID: <20230802142737.5572-1-wei.w.wang@intel.com> (raw)

The implementation of kvm_tdp_mmu_map is a bit long. It essentially does
three things:
1) adjust the leaf entry level (e.g. 4KB, 2MB or 1GB) to map according to
   the hugepage configurations;
2) map the nonleaf entries of the tdp page table; and
3) map the target leaf entry.

Improve the readabiliy by moving the implementation of 2) above into a
subfunction, kvm_tdp_mmu_map_nonleaf, and removing the unnecessary
"goto"s. No functional changes intended.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 76 ++++++++++++++++++++------------------
 1 file changed, 41 insertions(+), 35 deletions(-)

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 512163d52194..0b29a7f853b5 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1057,43 +1057,33 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
 				   struct kvm_mmu_page *sp, bool shared);
 
-/*
- * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
- * page tables and SPTEs to translate the faulting guest physical address.
- */
-int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int kvm_tdp_mmu_map_nonleafs(struct kvm_vcpu *vcpu,
+				    struct kvm_page_fault *fault,
+				    struct tdp_iter *iter)
 {
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
 	struct kvm *kvm = vcpu->kvm;
-	struct tdp_iter iter;
 	struct kvm_mmu_page *sp;
-	int ret = RET_PF_RETRY;
-
-	kvm_mmu_hugepage_adjust(vcpu, fault);
-
-	trace_kvm_mmu_spte_requested(fault);
-
-	rcu_read_lock();
-
-	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
-		int r;
+	int ret;
 
+	tdp_mmu_for_each_pte((*iter), mmu, fault->gfn, fault->gfn + 1) {
 		if (fault->nx_huge_page_workaround_enabled)
-			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
+			disallowed_hugepage_adjust(fault, iter->old_spte,
+						   iter->level);
 
 		/*
 		 * If SPTE has been frozen by another thread, just give up and
 		 * retry, avoiding unnecessary page table allocation and free.
 		 */
-		if (is_removed_spte(iter.old_spte))
-			goto retry;
+		if (is_removed_spte(iter->old_spte))
+			return RET_PF_RETRY;
 
-		if (iter.level == fault->goal_level)
-			goto map_target_level;
+		if (iter->level == fault->goal_level)
+			return RET_PF_CONTINUE;
 
 		/* Step down into the lower level page table if it exists. */
-		if (is_shadow_present_pte(iter.old_spte) &&
-		    !is_large_pte(iter.old_spte))
+		if (is_shadow_present_pte(iter->old_spte) &&
+		    !is_large_pte(iter->old_spte))
 			continue;
 
 		/*
@@ -1101,26 +1091,26 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 		 * needs to be split.
 		 */
 		sp = tdp_mmu_alloc_sp(vcpu);
-		tdp_mmu_init_child_sp(sp, &iter);
+		tdp_mmu_init_child_sp(sp, iter);
 
 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
 
-		if (is_shadow_present_pte(iter.old_spte))
-			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
+		if (is_shadow_present_pte(iter->old_spte))
+			ret = tdp_mmu_split_huge_page(kvm, iter, sp, true);
 		else
-			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
+			ret = tdp_mmu_link_sp(kvm, iter, sp, true);
 
 		/*
 		 * Force the guest to retry if installing an upper level SPTE
 		 * failed, e.g. because a different task modified the SPTE.
 		 */
-		if (r) {
+		if (ret) {
 			tdp_mmu_free_sp(sp);
-			goto retry;
+			return RET_PF_RETRY;
 		}
 
 		if (fault->huge_page_disallowed &&
-		    fault->req_level >= iter.level) {
+		    fault->req_level >= iter->level) {
 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
 			if (sp->nx_huge_page_disallowed)
 				track_possible_nx_huge_page(kvm, sp);
@@ -1132,13 +1122,29 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 	 * The walk aborted before reaching the target level, e.g. because the
 	 * iterator detected an upper level SPTE was frozen during traversal.
 	 */
-	WARN_ON_ONCE(iter.level == fault->goal_level);
-	goto retry;
+	WARN_ON_ONCE(iter->level == fault->goal_level);
+	return RET_PF_RETRY;
+}
 
-map_target_level:
-	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
+/*
+ * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
+ * page tables and SPTEs to translate the faulting guest physical address.
+ */
+int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+{
+	struct tdp_iter iter;
+	int ret;
+
+	kvm_mmu_hugepage_adjust(vcpu, fault);
+
+	trace_kvm_mmu_spte_requested(fault);
+
+	rcu_read_lock();
+
+	ret = kvm_tdp_mmu_map_nonleafs(vcpu, fault, &iter);
+	if (ret == RET_PF_CONTINUE)
+		ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
 
-retry:
 	rcu_read_unlock();
 	return ret;
 }
-- 
2.27.0


             reply	other threads:[~2023-08-02 14:38 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-02 14:27 Wei Wang [this message]
2023-08-02 15:14 ` [PATCH v1] KVM: x86/mmu: refactor kvm_tdp_mmu_map Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230802142737.5572-1-wei.w.wang@intel.com \
    --to=wei.w.wang@intel.com \
    --cc=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.