linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: seanjc@google.com
Subject: [PATCH 14/22] KVM: x86/mmu: remove extended bits from mmu_role, rename field
Date: Thu, 14 Apr 2022 03:39:52 -0400	[thread overview]
Message-ID: <20220414074000.31438-15-pbonzini@redhat.com> (raw)
In-Reply-To: <20220414074000.31438-1-pbonzini@redhat.com>

mmu_role represents the role of the root of the page tables.
It does not need any extended bits, as those govern only KVM's
page table walking; the is_* functions used for page table
walking always use the CPU role.

ext.valid is not present anymore in the MMU role, but an
all-zero MMU role is impossible because the level field is
never zero in the MMU role.  So just zap the whole mmu_role
in order to force invalidation after CPUID is updated.

While making this change, which requires touching almost every
occurrence of "mmu_role", rename it to "root_role".

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/mmu/mmu.c          | 86 ++++++++++++++++-----------------
 arch/x86/kvm/mmu/paging_tmpl.h  |  4 +-
 arch/x86/kvm/mmu/tdp_mmu.c      |  2 +-
 4 files changed, 46 insertions(+), 48 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a299236cfde5..c81221d03a1b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -439,7 +439,7 @@ struct kvm_mmu {
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
 	struct kvm_mmu_root_info root;
 	union kvm_mmu_role cpu_role;
-	union kvm_mmu_role mmu_role;
+	union kvm_mmu_page_role root_role;
 	u8 root_level;
 	u8 shadow_root_level;
 	bool direct_map;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f491d3c47ac8..13eb2d40e0a3 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -193,7 +193,7 @@ struct kvm_mmu_role_regs {
 
 /*
  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
- * reading from the role_regs.  Once the mmu_role is constructed, it becomes
+ * reading from the role_regs.  Once the root_role is constructed, it becomes
  * the single source of truth for the MMU's state.
  */
 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
@@ -2028,7 +2028,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 	int collisions = 0;
 	LIST_HEAD(invalid_list);
 
-	role = vcpu->arch.mmu->mmu_role.base;
+	role = vcpu->arch.mmu->root_role;
 	role.level = level;
 	role.direct = direct;
 	role.access = access;
@@ -3272,7 +3272,7 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
 	 * This should not be called while L2 is active, L2 can't invalidate
 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
 	 */
-	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
+	WARN_ON_ONCE(mmu->root_role.guest_mode);
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		root_hpa = mmu->prev_roots[i].hpa;
@@ -4183,7 +4183,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
 {
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
-	union kvm_mmu_page_role new_role = mmu->mmu_role.base;
+	union kvm_mmu_page_role new_role = mmu->root_role;
 
 	if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
 		/* kvm_mmu_ensure_valid_pgd will set up a new root.  */
@@ -4442,7 +4442,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
 	shadow_zero_check = &context->shadow_zero_check;
 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
 				context->shadow_root_level,
-				context->mmu_role.base.efer_nx,
+				context->root_role.efer_nx,
 				guest_can_use_gbpages(vcpu), is_pse, is_amd);
 
 	if (!shadow_me_mask)
@@ -4738,22 +4738,21 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
 	return max_tdp_level;
 }
 
-static union kvm_mmu_role
+static union kvm_mmu_page_role
 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
 				union kvm_mmu_role cpu_role)
 {
-	union kvm_mmu_role role = {0};
+	union kvm_mmu_page_role role = {0};
 
-	role.base.access = ACC_ALL;
-	role.base.cr0_wp = true;
-	role.base.efer_nx = true;
-	role.base.smm = cpu_role.base.smm;
-	role.base.guest_mode = cpu_role.base.guest_mode;
-	role.base.ad_disabled = (shadow_accessed_mask == 0);
-	role.base.level = kvm_mmu_get_tdp_level(vcpu);
-	role.base.direct = true;
-	role.base.has_4_byte_gpte = false;
-	role.ext.valid = true;
+	role.access = ACC_ALL;
+	role.cr0_wp = true;
+	role.efer_nx = true;
+	role.smm = cpu_role.base.smm;
+	role.guest_mode = cpu_role.base.guest_mode;
+	role.ad_disabled = (shadow_accessed_mask == 0);
+	role.level = kvm_mmu_get_tdp_level(vcpu);
+	role.direct = true;
+	role.has_4_byte_gpte = false;
 
 	return role;
 }
@@ -4763,14 +4762,14 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
-	union kvm_mmu_role mmu_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
+	union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
 
 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
-	    mmu_role.as_u64 == context->mmu_role.as_u64)
+	    root_role.word == context->root_role.word)
 		return;
 
 	context->cpu_role.as_u64 = cpu_role.as_u64;
-	context->mmu_role.as_u64 = mmu_role.as_u64;
+	context->root_role.word = root_role.word;
 	context->page_fault = kvm_tdp_page_fault;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = NULL;
@@ -4792,19 +4791,19 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	reset_tdp_shadow_zero_bits_mask(context);
 }
 
-static union kvm_mmu_role
+static union kvm_mmu_page_role
 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
 				   union kvm_mmu_role cpu_role)
 {
-	union kvm_mmu_role role;
+	union kvm_mmu_page_role role;
 
-	role = cpu_role;
+	role = cpu_role.base;
 	if (!cpu_role.ext.efer_lma)
-		role.base.level = PT32E_ROOT_LEVEL;
+		role.level = PT32E_ROOT_LEVEL;
 	else if (cpu_role.ext.cr4_la57)
-		role.base.level = PT64_ROOT_5LEVEL;
+		role.level = PT64_ROOT_5LEVEL;
 	else
-		role.base.level = PT64_ROOT_4LEVEL;
+		role.level = PT64_ROOT_4LEVEL;
 
 	/*
 	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
@@ -4815,20 +4814,20 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
 	 * MMU contexts.
 	 */
-	role.base.efer_nx = true;
+	role.efer_nx = true;
 	return role;
 }
 
 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
 				    union kvm_mmu_role cpu_role,
-				    union kvm_mmu_role mmu_role)
+				    union kvm_mmu_page_role root_role)
 {
 	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
-	    mmu_role.as_u64 == context->mmu_role.as_u64)
+	    root_role.word == context->root_role.word)
 		return;
 
 	context->cpu_role.as_u64 = cpu_role.as_u64;
-	context->mmu_role.as_u64 = mmu_role.as_u64;
+	context->root_role.word = root_role.word;
 
 	if (!is_cr0_pg(context))
 		nonpaging_init_context(context);
@@ -4839,7 +4838,7 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
 	context->root_level = cpu_role.base.level;
 
 	reset_guest_paging_metadata(vcpu, context);
-	context->shadow_root_level = mmu_role.base.level;
+	context->shadow_root_level = root_role.level;
 
 	reset_shadow_zero_bits_mask(vcpu, context);
 }
@@ -4849,22 +4848,21 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
-	union kvm_mmu_role mmu_role =
+	union kvm_mmu_page_role root_role =
 		kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role);
 
-	shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
+	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
 }
 
-static union kvm_mmu_role
+static union kvm_mmu_page_role
 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
 				   union kvm_mmu_role cpu_role)
 {
-	union kvm_mmu_role role;
+	union kvm_mmu_page_role role;
 
 	WARN_ON_ONCE(cpu_role.base.direct);
-	role = cpu_role;
-	role.base.level = kvm_mmu_get_tdp_level(vcpu);
-
+	role = cpu_role.base;
+	role.level = kvm_mmu_get_tdp_level(vcpu);
 	return role;
 }
 
@@ -4878,9 +4876,9 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
 		.efer = efer,
 	};
 	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
-	union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);
+	union kvm_mmu_page_role root_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);
 
-	shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
+	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
 	kvm_mmu_new_pgd(vcpu, nested_cr3);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@@ -4923,7 +4921,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 	if (new_mode.as_u64 != context->cpu_role.as_u64) {
 		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
 		context->cpu_role.as_u64 = new_mode.as_u64;
-		context->mmu_role.as_u64 = new_mode.as_u64;
+		context->root_role.word = new_mode.base.word;
 
 		context->shadow_root_level = level;
 
@@ -5023,9 +5021,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	 * problem is swept under the rug; KVM's CPUID API is horrific and
 	 * it's all but impossible to solve it without introducing a new API.
 	 */
-	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
-	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
-	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+	vcpu->arch.root_mmu.root_role.word = 0;
+	vcpu->arch.guest_mmu.root_role.word = 0;
+	vcpu->arch.nested_mmu.root_role.word = 0;
 	vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
 	vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
 	vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 298e502286cf..24157f637bd7 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -988,7 +988,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
-	union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
+	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
 	int i;
 	bool host_writable;
 	gpa_t first_pte_gpa;
@@ -1016,7 +1016,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	 * reserved bits checks will be wrong, etc...
 	 */
 	if (WARN_ON_ONCE(sp->role.direct ||
-			 (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
+			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
 		return -1;
 
 	first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index c472769e0300..bbd2a6dc8c20 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -310,7 +310,7 @@ static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
 
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
 {
-	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
+	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_page *root;
 
-- 
2.31.1



  parent reply	other threads:[~2022-04-14  7:41 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-14  7:39 [PATCH v3 00/22] https://www.spinics.net/lists/kvm/msg267878.html Paolo Bonzini
2022-04-14  7:39 ` [PATCH 01/22] KVM: x86/mmu: nested EPT cannot be used in SMM Paolo Bonzini
2022-04-14  7:39 ` [PATCH 02/22] KVM: x86/mmu: constify uses of struct kvm_mmu_role_regs Paolo Bonzini
2022-04-14  7:39 ` [PATCH 03/22] KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu Paolo Bonzini
2022-04-14  7:39 ` [PATCH 04/22] KVM: x86/mmu: rephrase unclear comment Paolo Bonzini
2022-04-14  7:39 ` [PATCH 05/22] KVM: x86: Clean up and document nested #PF workaround Paolo Bonzini
2022-04-14  7:39 ` [PATCH 06/22] KVM: x86/mmu: remove "bool base_only" arguments Paolo Bonzini
2022-04-14  7:39 ` [PATCH 07/22] KVM: x86/mmu: split cpu_role from mmu_role Paolo Bonzini
2022-04-15  4:16   ` Like Xu
2022-04-14  7:39 ` [PATCH 08/22] KVM: x86/mmu: do not recompute root level from kvm_mmu_role_regs Paolo Bonzini
2022-04-14  7:39 ` [PATCH 09/22] KVM: x86/mmu: remove ept_ad field Paolo Bonzini
2022-04-14  7:39 ` [PATCH 10/22] KVM: x86/mmu: remove kvm_calc_shadow_root_page_role_common Paolo Bonzini
2022-04-14  7:39 ` [PATCH 11/22] KVM: x86/mmu: cleanup computation of MMU roles for two-dimensional paging Paolo Bonzini
2022-04-14  7:39 ` [PATCH 12/22] KVM: x86/mmu: cleanup computation of MMU roles for shadow paging Paolo Bonzini
2022-04-14  7:39 ` [PATCH 13/22] KVM: x86/mmu: store shadow EFER.NX in the MMU role Paolo Bonzini
2022-04-14  7:39 ` Paolo Bonzini [this message]
2022-04-14  7:39 ` [PATCH 15/22] KVM: x86/mmu: rename kvm_mmu_role union Paolo Bonzini
2022-04-14  7:39 ` [PATCH 16/22] KVM: x86/mmu: remove redundant bits from extended role Paolo Bonzini
2022-04-14  8:27   ` Paolo Bonzini
2022-05-10  0:20   ` Sean Christopherson
2022-05-11 13:53     ` Paolo Bonzini
2022-05-12 13:59     ` Paolo Bonzini
2022-05-12 14:18       ` Sean Christopherson
2022-05-12 16:09         ` Paolo Bonzini
2022-05-12 21:34           ` Sean Christopherson
2022-04-14  7:39 ` [PATCH 17/22] KVM: x86/mmu: remove valid " Paolo Bonzini
2022-04-14  7:39 ` [PATCH 18/22] KVM: x86/mmu: simplify and/or inline computation of shadow MMU roles Paolo Bonzini
2022-04-14  7:39 ` [PATCH 19/22] KVM: x86/mmu: pull CPU mode computation to kvm_init_mmu Paolo Bonzini
2022-04-14  7:39 ` [PATCH 20/22] KVM: x86/mmu: replace shadow_root_level with root_role.level Paolo Bonzini
2022-04-14  7:39 ` [PATCH 21/22] KVM: x86/mmu: replace root_level with cpu_role.base.level Paolo Bonzini
2022-04-14  7:40 ` [PATCH 22/22] KVM: x86/mmu: replace direct_map with root_role.direct Paolo Bonzini
2022-04-14  7:47 ` [PATCH v3 00/22] https://www.spinics.net/lists/kvm/msg267878.html Paolo Bonzini
2022-04-14 15:02   ` Sean Christopherson
2022-04-14 15:09     ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220414074000.31438-15-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).