All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: dmatlack@google.com, seanjc@google.com
Subject: [PATCH v2 23/25] KVM: x86/mmu: replace direct_map with root_role.direct
Date: Mon, 21 Feb 2022 11:22:41 -0500	[thread overview]
Message-ID: <20220221162243.683208-24-pbonzini@redhat.com> (raw)
In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com>

direct_map is always equal to the direct field of the root page's role:

- for shadow paging, direct_map is true if CR0.PG=0 and root_role.direct is
copied from cpu_mode.base.direct

- for TDP, it is always true and root_role.direct is also always true

- for shadow EPT, it is always false and root_role.direct is also always
false

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/mmu/mmu.c          | 27 ++++++++++++---------------
 arch/x86/kvm/x86.c              | 12 ++++++------
 3 files changed, 18 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ec89b1a488c5..af90d0653139 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -431,7 +431,6 @@ struct kvm_mmu {
 	struct kvm_mmu_root_info root;
 	union kvm_mmu_paging_mode cpu_mode;
 	union kvm_mmu_page_role root_role;
-	bool direct_map;
 	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
 
 	/*
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0c88d4206715..8eb2c0373309 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2029,7 +2029,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     int direct,
 					     unsigned int access)
 {
-	bool direct_mmu = vcpu->arch.mmu->direct_map;
+	bool direct_mmu = vcpu->arch.mmu->root_role.direct;
 	union kvm_mmu_page_role role;
 	struct hlist_head *sp_list;
 	unsigned quadrant;
@@ -2131,7 +2131,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
 
 	if (iterator->level >= PT64_ROOT_4LEVEL &&
 	    vcpu->arch.mmu->cpu_mode.base.level < PT64_ROOT_4LEVEL &&
-	    !vcpu->arch.mmu->direct_map)
+	    !vcpu->arch.mmu->root_role.direct)
 		iterator->level = PT32E_ROOT_LEVEL;
 
 	if (iterator->level == PT32E_ROOT_LEVEL) {
@@ -2507,7 +2507,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 	gpa_t gpa;
 	int r;
 
-	if (vcpu->arch.mmu->direct_map)
+	if (vcpu->arch.mmu->root_role.direct)
 		return 0;
 
 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -3545,7 +3545,8 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
 	 */
-	if (mmu->direct_map || mmu->cpu_mode.base.level >= PT64_ROOT_4LEVEL ||
+	if (mmu->root_role.direct ||
+	    mmu->cpu_mode.base.level >= PT64_ROOT_4LEVEL ||
 	    mmu->root_role.level < PT64_ROOT_4LEVEL)
 		return 0;
 
@@ -3634,7 +3635,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 	int i;
 	struct kvm_mmu_page *sp;
 
-	if (vcpu->arch.mmu->direct_map)
+	if (vcpu->arch.mmu->root_role.direct)
 		return;
 
 	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
@@ -3854,7 +3855,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
 	arch.gfn = gfn;
-	arch.direct_map = mmu->direct_map;
+	arch.direct_map = mmu->root_role.direct;
 	arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, mmu);
 
 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
@@ -4072,7 +4073,6 @@ static void nonpaging_init_context(struct kvm_mmu *context)
 	context->gva_to_gpa = nonpaging_gva_to_gpa;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = NULL;
-	context->direct_map = true;
 }
 
 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -4654,7 +4654,6 @@ static void paging64_init_context(struct kvm_mmu *context)
 	context->gva_to_gpa = paging64_gva_to_gpa;
 	context->sync_page = paging64_sync_page;
 	context->invlpg = paging64_invlpg;
-	context->direct_map = false;
 }
 
 static void paging32_init_context(struct kvm_mmu *context)
@@ -4663,7 +4662,6 @@ static void paging32_init_context(struct kvm_mmu *context)
 	context->gva_to_gpa = paging32_gva_to_gpa;
 	context->sync_page = paging32_sync_page;
 	context->invlpg = paging32_invlpg;
-	context->direct_map = false;
 }
 
 static union kvm_mmu_paging_mode
@@ -4748,7 +4746,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_paging_mode cp
 	context->page_fault = kvm_tdp_page_fault;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = NULL;
-	context->direct_map = true;
 	context->get_guest_pgd = kvm_get_guest_cr3;
 	context->get_pdptr = kvm_pdptr_read;
 	context->inject_page_fault = kvm_inject_page_fault;
@@ -4872,7 +4869,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 		context->gva_to_gpa = ept_gva_to_gpa;
 		context->sync_page = ept_sync_page;
 		context->invlpg = ept_invlpg;
-		context->direct_map = false;
+
 		update_permission_bitmask(context, true);
 		context->pkru_mask = 0;
 		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
@@ -4987,13 +4984,13 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
 	int r;
 
-	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
+	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
 	if (r)
 		goto out;
 	r = mmu_alloc_special_roots(vcpu);
 	if (r)
 		goto out;
-	if (vcpu->arch.mmu->direct_map)
+	if (vcpu->arch.mmu->root_role.direct)
 		r = mmu_alloc_direct_roots(vcpu);
 	else
 		r = mmu_alloc_shadow_roots(vcpu);
@@ -5197,7 +5194,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len)
 {
 	int r, emulation_type = EMULTYPE_PF;
-	bool direct = vcpu->arch.mmu->direct_map;
+	bool direct = vcpu->arch.mmu->root_role.direct;
 
 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
 		return RET_PF_RETRY;
@@ -5228,7 +5225,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 	 * paging in both guests. If true, we simply unprotect the page
 	 * and resume the guest.
 	 */
-	if (vcpu->arch.mmu->direct_map &&
+	if (vcpu->arch.mmu->root_role.direct &&
 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
 		return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1546a25a9307..53730e81ceb5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8016,7 +8016,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 	    WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
 		return false;
 
-	if (!vcpu->arch.mmu->direct_map) {
+	if (!vcpu->arch.mmu->root_role.direct) {
 		/*
 		 * Write permission should be allowed since only
 		 * write access need to be emulated.
@@ -8049,7 +8049,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 	kvm_release_pfn_clean(pfn);
 
 	/* The instructions are well-emulated on direct mmu. */
-	if (vcpu->arch.mmu->direct_map) {
+	if (vcpu->arch.mmu->root_role.direct) {
 		unsigned int indirect_shadow_pages;
 
 		write_lock(&vcpu->kvm->mmu_lock);
@@ -8117,7 +8117,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	vcpu->arch.last_retry_eip = ctxt->eip;
 	vcpu->arch.last_retry_addr = cr2_or_gpa;
 
-	if (!vcpu->arch.mmu->direct_map)
+	if (!vcpu->arch.mmu->root_role.direct)
 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -8397,7 +8397,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 		ctxt->exception.address = cr2_or_gpa;
 
 		/* With shadow page tables, cr2 contains a GVA or nGPA. */
-		if (vcpu->arch.mmu->direct_map) {
+		if (vcpu->arch.mmu->root_role.direct) {
 			ctxt->gpa_available = true;
 			ctxt->gpa_val = cr2_or_gpa;
 		}
@@ -12198,7 +12198,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 {
 	int r;
 
-	if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
+	if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
 	      work->wakeup_all)
 		return;
 
@@ -12206,7 +12206,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 	if (unlikely(r))
 		return;
 
-	if (!vcpu->arch.mmu->direct_map &&
+	if (!vcpu->arch.mmu->root_role.direct &&
 	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
 		return;
 
-- 
2.31.1



  parent reply	other threads:[~2022-02-21 16:24 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-21 16:22 [PATCH v2 00/25] KVM MMU refactoring part 2: role changes Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 01/25] KVM: x86/mmu: avoid indirect call for get_cr3 Paolo Bonzini
2022-03-08 16:16   ` Sean Christopherson
2022-03-08 16:21     ` Paolo Bonzini
2022-03-08 16:32       ` Sean Christopherson
2022-03-08 16:43         ` Paolo Bonzini
2022-03-08 16:53           ` Sean Christopherson
2022-03-08 17:14             ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 02/25] KVM: x86/mmu: nested EPT cannot be used in SMM Paolo Bonzini
2022-03-08 16:18   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 03/25] KVM: x86/mmu: constify uses of struct kvm_mmu_role_regs Paolo Bonzini
2022-03-08 16:22   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 04/25] KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 05/25] KVM: x86/mmu: rephrase unclear comment Paolo Bonzini
2022-03-08 16:39   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 06/25] KVM: nVMX/nSVM: do not monkey-patch inject_page_fault callback Paolo Bonzini
2022-03-08 17:13   ` Sean Christopherson
2022-03-08 20:34     ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 07/25] KVM: x86/mmu: remove "bool base_only" arguments Paolo Bonzini
2022-03-08 17:15   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 08/25] KVM: x86/mmu: split cpu_mode from mmu_role Paolo Bonzini
2022-03-08 17:36   ` Sean Christopherson
2022-03-08 17:49     ` Paolo Bonzini
2022-03-08 18:55   ` Sean Christopherson
2022-03-09  9:58     ` Paolo Bonzini
2022-03-09 15:38       ` Sean Christopherson
2022-03-09 15:40         ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 09/25] KVM: x86/mmu: do not recompute root level from kvm_mmu_role_regs Paolo Bonzini
2022-03-08 17:41   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 10/25] KVM: x86/mmu: remove ept_ad field Paolo Bonzini
2022-03-08 17:42   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 11/25] KVM: x86/mmu: remove kvm_calc_shadow_root_page_role_common Paolo Bonzini
2022-03-08 17:48   ` Sean Christopherson
2022-03-08 17:50     ` Paolo Bonzini
2022-03-08 18:17       ` Sean Christopherson
2022-03-08 18:18         ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 12/25] KVM: x86/mmu: cleanup computation of MMU roles for two-dimensional paging Paolo Bonzini
2022-03-08 18:11   ` Sean Christopherson
2022-03-08 18:24     ` Paolo Bonzini
2022-03-08 18:44       ` Sean Christopherson
2022-03-08 18:38     ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 13/25] KVM: x86/mmu: cleanup computation of MMU roles for shadow paging Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 14/25] KVM: x86/mmu: store shadow EFER.NX in the MMU role Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 15/25] KVM: x86/mmu: remove extended bits from mmu_role, rename field Paolo Bonzini
2022-03-08 19:02   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 16/25] KVM: x86/mmu: rename kvm_mmu_role union Paolo Bonzini
2022-03-08 19:15   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 17/25] KVM: x86/mmu: remove redundant bits from extended role Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 18/25] KVM: x86/mmu: remove valid " Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 19/25] KVM: x86/mmu: simplify and/or inline computation of shadow MMU roles Paolo Bonzini
2022-03-08 19:35   ` Sean Christopherson
2022-03-08 19:41     ` Sean Christopherson
2022-03-09 10:33     ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 20/25] KVM: x86/mmu: pull CPU mode computation to kvm_init_mmu Paolo Bonzini
2022-03-08 19:45   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 21/25] KVM: x86/mmu: replace shadow_root_level with root_role.level Paolo Bonzini
2022-03-08 19:48   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 22/25] KVM: x86/mmu: replace root_level with cpu_mode.base.level Paolo Bonzini
2022-03-08 19:49   ` Sean Christopherson
2022-02-21 16:22 ` Paolo Bonzini [this message]
2022-03-08 19:52   ` [PATCH v2 23/25] KVM: x86/mmu: replace direct_map with root_role.direct Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 24/25] KVM: x86/mmu: initialize constant-value fields just once Paolo Bonzini
2022-03-08 20:58   ` Sean Christopherson
2022-03-09 10:34     ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 25/25] KVM: x86/mmu: extract initialization of the page walking data Paolo Bonzini
2022-03-08 20:02   ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220221162243.683208-24-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.