All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: dmatlack@google.com, seanjc@google.com
Subject: [PATCH v2 08/25] KVM: x86/mmu: split cpu_mode from mmu_role
Date: Mon, 21 Feb 2022 11:22:26 -0500	[thread overview]
Message-ID: <20220221162243.683208-9-pbonzini@redhat.com> (raw)
In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com>

Snapshot the state of the processor registers that govern page walk into
a new field of struct kvm_mmu.  This is a more natural representation
than having it *mostly* in mmu_role but not exclusively; the delta
right now is represented in other fields, such as root_level.

The nested MMU now has only the CPU mode; and in fact the new function
kvm_calc_cpu_mode is analogous to the previous kvm_calc_nested_mmu_role,
except that it has role.base.direct equal to CR0.PG.  It is not clear
what the code meant by "setting role.base.direct to true to detect bogus
usage of the nested MMU".

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |   1 +
 arch/x86/kvm/mmu/mmu.c          | 107 ++++++++++++++++++++------------
 arch/x86/kvm/mmu/paging_tmpl.h  |   2 +-
 3 files changed, 68 insertions(+), 42 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 92855d3984a7..cc268116eb3f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -433,6 +433,7 @@ struct kvm_mmu {
 			 struct kvm_mmu_page *sp);
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
 	struct kvm_mmu_root_info root;
+	union kvm_mmu_role cpu_mode;
 	union kvm_mmu_role mmu_role;
 	u8 root_level;
 	u8 shadow_root_level;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7c835253a330..1af898f0cf87 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -221,7 +221,7 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
 {								\
-	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
+	return !!(mmu->cpu_mode. base_or_ext . reg##_##name);	\
 }
 BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
@@ -4680,6 +4680,39 @@ static void paging32_init_context(struct kvm_mmu *context)
 	context->direct_map = false;
 }
 
+static union kvm_mmu_role
+kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
+{
+	union kvm_mmu_role role = {0};
+
+	role.base.access = ACC_ALL;
+	role.base.smm = is_smm(vcpu);
+	role.base.guest_mode = is_guest_mode(vcpu);
+	role.base.direct = !____is_cr0_pg(regs);
+	if (!role.base.direct) {
+		role.base.efer_nx = ____is_efer_nx(regs);
+		role.base.cr0_wp = ____is_cr0_wp(regs);
+		role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
+		role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
+		role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
+		role.base.level = role_regs_to_root_level(regs);
+
+		role.ext.cr0_pg = 1;
+		role.ext.cr4_pae = ____is_cr4_pae(regs);
+		role.ext.cr4_smep = ____is_cr4_smep(regs);
+		role.ext.cr4_smap = ____is_cr4_smap(regs);
+		role.ext.cr4_pse = ____is_cr4_pse(regs);
+
+		/* PKEY and LA57 are active iff long mode is active. */
+		role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+		role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+		role.ext.efer_lma = ____is_efer_lma(regs);
+	}
+
+	role.ext.valid = 1;
+	return role;
+}
+
 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
 						   const struct kvm_mmu_role_regs *regs)
 {
@@ -4739,13 +4772,16 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 			     const struct kvm_mmu_role_regs *regs)
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
-	union kvm_mmu_role new_role =
+	union kvm_mmu_role cpu_mode = kvm_calc_cpu_mode(vcpu, regs);
+	union kvm_mmu_role mmu_role =
 		kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
 
-	if (new_role.as_u64 == context->mmu_role.as_u64)
+	if (cpu_mode.as_u64 == context->cpu_mode.as_u64 &&
+	    mmu_role.as_u64 == context->mmu_role.as_u64)
 		return;
 
-	context->mmu_role.as_u64 = new_role.as_u64;
+	context->cpu_mode.as_u64 = cpu_mode.as_u64;
+	context->mmu_role.as_u64 = mmu_role.as_u64;
 	context->page_fault = kvm_tdp_page_fault;
 	context->sync_page = nonpaging_sync_page;
 	context->invlpg = NULL;
@@ -4800,13 +4836,15 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
 }
 
 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
-				    const struct kvm_mmu_role_regs *regs,
-				    union kvm_mmu_role new_role)
+				    union kvm_mmu_role cpu_mode,
+				    union kvm_mmu_role mmu_role)
 {
-	if (new_role.as_u64 == context->mmu_role.as_u64)
+	if (cpu_mode.as_u64 == context->cpu_mode.as_u64 &&
+	    mmu_role.as_u64 == context->mmu_role.as_u64)
 		return;
 
-	context->mmu_role.as_u64 = new_role.as_u64;
+	context->cpu_mode.as_u64 = cpu_mode.as_u64;
+	context->mmu_role.as_u64 = mmu_role.as_u64;
 
 	if (!is_cr0_pg(context))
 		nonpaging_init_context(context);
@@ -4814,10 +4852,10 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
 		paging64_init_context(context);
 	else
 		paging32_init_context(context);
-	context->root_level = role_regs_to_root_level(regs);
+	context->root_level = cpu_mode.base.level;
 
 	reset_guest_paging_metadata(vcpu, context);
-	context->shadow_root_level = new_role.base.level;
+	context->shadow_root_level = mmu_role.base.level;
 
 	reset_shadow_zero_bits_mask(vcpu, context);
 }
@@ -4826,10 +4864,11 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
 				const struct kvm_mmu_role_regs *regs)
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
-	union kvm_mmu_role new_role =
+	union kvm_mmu_role cpu_mode = kvm_calc_cpu_mode(vcpu, regs);
+	union kvm_mmu_role mmu_role =
 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
 
-	shadow_mmu_init_context(vcpu, context, regs, new_role);
+	shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role);
 }
 
 static union kvm_mmu_role
@@ -4854,11 +4893,10 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
 		.cr4 = cr4 & ~X86_CR4_PKE,
 		.efer = efer,
 	};
-	union kvm_mmu_role new_role;
+	union kvm_mmu_role cpu_mode = kvm_calc_cpu_mode(vcpu, &regs);
+	union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);;
 
-	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
-
-	shadow_mmu_init_context(vcpu, context, &regs, new_role);
+	shadow_mmu_init_context(vcpu, context, cpu_mode, mmu_role);
 	kvm_mmu_new_pgd(vcpu, nested_cr3);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@@ -4881,7 +4919,6 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
 	role.base.guest_mode = true;
 	role.base.access = ACC_ALL;
 
-	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
 	role.ext.word = 0;
 	role.ext.execonly = execonly;
 	role.ext.valid = 1;
@@ -4895,12 +4932,14 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 {
 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
 	u8 level = vmx_eptp_page_walk_level(new_eptp);
-	union kvm_mmu_role new_role =
+	union kvm_mmu_role new_mode =
 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
 						   execonly, level);
 
-	if (new_role.as_u64 != context->mmu_role.as_u64) {
-		context->mmu_role.as_u64 = new_role.as_u64;
+	if (new_mode.as_u64 != context->cpu_mode.as_u64) {
+		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
+		context->cpu_mode.as_u64 = new_mode.as_u64;
+		context->mmu_role.as_u64 = new_mode.as_u64;
 
 		context->shadow_root_level = level;
 
@@ -4933,36 +4972,19 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
 	context->inject_page_fault = kvm_inject_page_fault_shadow;
 }
 
-static union kvm_mmu_role
-kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
-{
-	union kvm_mmu_role role;
-
-	role = kvm_calc_shadow_root_page_role_common(vcpu, regs);
-
-	/*
-	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
-	 * shadow pages of their own and so "direct" has no meaning.   Set it
-	 * to "true" to try to detect bogus usage of the nested MMU.
-	 */
-	role.base.direct = true;
-	role.base.level = role_regs_to_root_level(regs);
-	return role;
-}
-
 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
 {
-	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs);
+	union kvm_mmu_role new_mode = kvm_calc_cpu_mode(vcpu, regs);
 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
-	if (new_role.as_u64 == g_context->mmu_role.as_u64)
+	if (new_mode.as_u64 == g_context->cpu_mode.as_u64)
 		return;
 
-	g_context->mmu_role.as_u64 = new_role.as_u64;
+	g_context->cpu_mode.as_u64   = new_mode.as_u64;
 	g_context->get_guest_pgd     = kvm_get_guest_cr3;
 	g_context->get_pdptr         = kvm_pdptr_read;
 	g_context->inject_page_fault = kvm_inject_page_fault;
-	g_context->root_level        = new_role.base.level;
+	g_context->root_level        = new_mode.base.level;
 
 	/*
 	 * L2 page tables are never shadowed, so there is no need to sync
@@ -5020,6 +5042,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+	vcpu->arch.root_mmu.cpu_mode.ext.valid = 0;
+	vcpu->arch.guest_mmu.cpu_mode.ext.valid = 0;
+	vcpu->arch.nested_mmu.cpu_mode.ext.valid = 0;
 	kvm_mmu_reset_context(vcpu);
 
 	/*
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index d1d17d28e81b..e1c2ecb4ddee 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -323,7 +323,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
 	 * is not reserved and does not indicate a large page at this level,
 	 * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
 	 */
-	gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse);
+	gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_mode.ext.cr4_pse);
 #endif
 	/*
 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
-- 
2.31.1



  parent reply	other threads:[~2022-02-21 16:23 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-21 16:22 [PATCH v2 00/25] KVM MMU refactoring part 2: role changes Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 01/25] KVM: x86/mmu: avoid indirect call for get_cr3 Paolo Bonzini
2022-03-08 16:16   ` Sean Christopherson
2022-03-08 16:21     ` Paolo Bonzini
2022-03-08 16:32       ` Sean Christopherson
2022-03-08 16:43         ` Paolo Bonzini
2022-03-08 16:53           ` Sean Christopherson
2022-03-08 17:14             ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 02/25] KVM: x86/mmu: nested EPT cannot be used in SMM Paolo Bonzini
2022-03-08 16:18   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 03/25] KVM: x86/mmu: constify uses of struct kvm_mmu_role_regs Paolo Bonzini
2022-03-08 16:22   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 04/25] KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 05/25] KVM: x86/mmu: rephrase unclear comment Paolo Bonzini
2022-03-08 16:39   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 06/25] KVM: nVMX/nSVM: do not monkey-patch inject_page_fault callback Paolo Bonzini
2022-03-08 17:13   ` Sean Christopherson
2022-03-08 20:34     ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 07/25] KVM: x86/mmu: remove "bool base_only" arguments Paolo Bonzini
2022-03-08 17:15   ` Sean Christopherson
2022-02-21 16:22 ` Paolo Bonzini [this message]
2022-03-08 17:36   ` [PATCH v2 08/25] KVM: x86/mmu: split cpu_mode from mmu_role Sean Christopherson
2022-03-08 17:49     ` Paolo Bonzini
2022-03-08 18:55   ` Sean Christopherson
2022-03-09  9:58     ` Paolo Bonzini
2022-03-09 15:38       ` Sean Christopherson
2022-03-09 15:40         ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 09/25] KVM: x86/mmu: do not recompute root level from kvm_mmu_role_regs Paolo Bonzini
2022-03-08 17:41   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 10/25] KVM: x86/mmu: remove ept_ad field Paolo Bonzini
2022-03-08 17:42   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 11/25] KVM: x86/mmu: remove kvm_calc_shadow_root_page_role_common Paolo Bonzini
2022-03-08 17:48   ` Sean Christopherson
2022-03-08 17:50     ` Paolo Bonzini
2022-03-08 18:17       ` Sean Christopherson
2022-03-08 18:18         ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 12/25] KVM: x86/mmu: cleanup computation of MMU roles for two-dimensional paging Paolo Bonzini
2022-03-08 18:11   ` Sean Christopherson
2022-03-08 18:24     ` Paolo Bonzini
2022-03-08 18:44       ` Sean Christopherson
2022-03-08 18:38     ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 13/25] KVM: x86/mmu: cleanup computation of MMU roles for shadow paging Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 14/25] KVM: x86/mmu: store shadow EFER.NX in the MMU role Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 15/25] KVM: x86/mmu: remove extended bits from mmu_role, rename field Paolo Bonzini
2022-03-08 19:02   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 16/25] KVM: x86/mmu: rename kvm_mmu_role union Paolo Bonzini
2022-03-08 19:15   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 17/25] KVM: x86/mmu: remove redundant bits from extended role Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 18/25] KVM: x86/mmu: remove valid " Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 19/25] KVM: x86/mmu: simplify and/or inline computation of shadow MMU roles Paolo Bonzini
2022-03-08 19:35   ` Sean Christopherson
2022-03-08 19:41     ` Sean Christopherson
2022-03-09 10:33     ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 20/25] KVM: x86/mmu: pull CPU mode computation to kvm_init_mmu Paolo Bonzini
2022-03-08 19:45   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 21/25] KVM: x86/mmu: replace shadow_root_level with root_role.level Paolo Bonzini
2022-03-08 19:48   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 22/25] KVM: x86/mmu: replace root_level with cpu_mode.base.level Paolo Bonzini
2022-03-08 19:49   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 23/25] KVM: x86/mmu: replace direct_map with root_role.direct Paolo Bonzini
2022-03-08 19:52   ` Sean Christopherson
2022-02-21 16:22 ` [PATCH v2 24/25] KVM: x86/mmu: initialize constant-value fields just once Paolo Bonzini
2022-03-08 20:58   ` Sean Christopherson
2022-03-09 10:34     ` Paolo Bonzini
2022-02-21 16:22 ` [PATCH v2 25/25] KVM: x86/mmu: extract initialization of the page walking data Paolo Bonzini
2022-03-08 20:02   ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220221162243.683208-9-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.