All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: dmatlack@google.com, seanjc@google.com, vkuznets@redhat.com
Subject: [PATCH 22/23] KVM: MMU: use cpu_role for root_level
Date: Fri,  4 Feb 2022 06:57:17 -0500	[thread overview]
Message-ID: <20220204115718.14934-23-pbonzini@redhat.com> (raw)
In-Reply-To: <20220204115718.14934-1-pbonzini@redhat.com>

Remove another duplicate field of struct kvm_mmu.  This time it's
the root level for page table walking; we were already initializing
it mostly as cpu_role.base.level, but the field still existed;
remove it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/mmu/mmu.c          | 22 +++++++++-------------
 arch/x86/kvm/mmu/mmu_audit.c    |  6 +++---
 arch/x86/kvm/mmu/paging_tmpl.h  |  4 ++--
 4 files changed, 14 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 867fc82f1de5..c86a2beee92a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -432,7 +432,6 @@ struct kvm_mmu {
 	gpa_t root_pgd;
 	union kvm_mmu_role cpu_role;
 	union kvm_mmu_page_role mmu_role;
-	u8 root_level;
 	bool direct_map;
 	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4d1fa87718f8..5a6541d6a424 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2146,7 +2146,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
 	iterator->level = vcpu->arch.mmu->mmu_role.level;
 
 	if (iterator->level >= PT64_ROOT_4LEVEL &&
-	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
+	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
 	    !vcpu->arch.mmu->direct_map)
 		iterator->level = PT32E_ROOT_LEVEL;
 
@@ -3255,7 +3255,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 
 	if (free_active_root) {
 		if (mmu->mmu_role.level >= PT64_ROOT_4LEVEL &&
-		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+		    (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
 		} else if (mmu->pae_root) {
 			for (i = 0; i < 4; ++i) {
@@ -3453,7 +3453,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 	 * On SVM, reading PDPTRs might access guest memory, which might fault
 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
 	 */
-	if (mmu->root_level == PT32E_ROOT_LEVEL) {
+	if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
 		for (i = 0; i < 4; ++i) {
 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
 			if (!(pdptrs[i] & PT_PRESENT_MASK))
@@ -3477,7 +3477,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 	 * Do we shadow a long mode page table? If so we need to
 	 * write-protect the guests page table root.
 	 */
-	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
+	if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
 		root = mmu_alloc_root(vcpu, root_gfn, 0,
 				      mmu->mmu_role.level, false);
 		mmu->root_hpa = root;
@@ -3516,7 +3516,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 	for (i = 0; i < 4; ++i) {
 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
 
-		if (mmu->root_level == PT32E_ROOT_LEVEL) {
+		if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
 				mmu->pae_root[i] = INVALID_PAE_ROOT;
 				continue;
@@ -3558,7 +3558,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
 	 */
-	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
+	if (mmu->direct_map || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
 	    mmu->mmu_role.level < PT64_ROOT_4LEVEL)
 		return 0;
 
@@ -3655,7 +3655,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 
 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
 
-	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
 		hpa_t root = vcpu->arch.mmu->root_hpa;
 		sp = to_shadow_page(root);
 
@@ -4146,7 +4146,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 	 * later if necessary.
 	 */
 	if (mmu->mmu_role.level >= PT64_ROOT_4LEVEL &&
-	    mmu->root_level >= PT64_ROOT_4LEVEL)
+	    mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL)
 		return cached_root_available(vcpu, new_pgd, new_role);
 
 	return false;
@@ -4335,7 +4335,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
 {
 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
 				vcpu->arch.reserved_gpa_bits,
-				context->root_level, is_efer_nx(context),
+				context->cpu_role.base.level, is_efer_nx(context),
 				guest_can_use_gbpages(vcpu),
 				is_cr4_pse(context),
 				guest_cpuid_is_amd_or_hygon(vcpu));
@@ -4739,7 +4739,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_role cpu_role)
 	context->get_guest_pgd = get_cr3;
 	context->get_pdptr = kvm_pdptr_read;
 	context->inject_page_fault = kvm_inject_page_fault;
-	context->root_level = cpu_role.base.level;
 
 	if (!is_cr0_pg(context))
 		context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -4769,7 +4768,6 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
 		paging64_init_context(context);
 	else
 		paging32_init_context(context);
-	context->root_level = cpu_role.base.level;
 
 	reset_guest_paging_metadata(vcpu, context);
 }
@@ -4854,7 +4852,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 		context->gva_to_gpa = ept_gva_to_gpa;
 		context->sync_page = ept_sync_page;
 		context->invlpg = ept_invlpg;
-		context->root_level = level;
 		context->direct_map = false;
 		update_permission_bitmask(context, true);
 		context->pkru_mask = 0;
@@ -4889,7 +4886,6 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_role new_ro
 	g_context->get_guest_pgd     = get_cr3;
 	g_context->get_pdptr         = kvm_pdptr_read;
 	g_context->inject_page_fault = kvm_inject_page_fault;
-	g_context->root_level        = new_role.base.level;
 
 	/*
 	 * L2 page tables are never shadowed, so there is no need to sync
diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c
index f31fdb874f1f..eb9c59fcb957 100644
--- a/arch/x86/kvm/mmu/mmu_audit.c
+++ b/arch/x86/kvm/mmu/mmu_audit.c
@@ -59,11 +59,11 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
 		return;
 
-	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
 		hpa_t root = vcpu->arch.mmu->root_hpa;
 
 		sp = to_shadow_page(root);
-		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
+		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->cpu_role.base.level);
 		return;
 	}
 
@@ -119,7 +119,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
 	hpa =  pfn << PAGE_SHIFT;
 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
 		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
-			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
+			     "ent %llxn", vcpu->arch.mmu->cpu_role.base.level, pfn,
 			     hpa, *sptep);
 }
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 847c4339e4d9..dd0b6f83171f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -361,7 +361,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 
 	trace_kvm_mmu_pagetable_walk(addr, access);
 retry_walk:
-	walker->level = mmu->root_level;
+	walker->level = mmu->cpu_role.base.level;
 	pte           = mmu->get_guest_pgd(vcpu);
 	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
 
@@ -656,7 +656,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 	WARN_ON_ONCE(gw->gfn != base_gfn);
 	direct_access = gw->pte_access;
 
-	top_level = vcpu->arch.mmu->root_level;
+	top_level = vcpu->arch.mmu->cpu_role.base.level;
 	if (top_level == PT32E_ROOT_LEVEL)
 		top_level = PT32_ROOT_LEVEL;
 	/*
-- 
2.31.1



  parent reply	other threads:[~2022-02-04 11:58 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-04 11:56 [PATCH 00/23] KVM: MMU: MMU role refactoring Paolo Bonzini
2022-02-04 11:56 ` [PATCH 01/23] KVM: MMU: pass uses_nx directly to reset_shadow_zero_bits_mask Paolo Bonzini
2022-02-04 17:59   ` David Matlack
2022-02-05 14:52     ` Paolo Bonzini
2022-02-07 16:09       ` Sean Christopherson
2022-02-07 21:50         ` David Matlack
2022-02-10  0:30     ` Sean Christopherson
2022-02-10 16:38       ` Paolo Bonzini
2022-02-04 11:56 ` [PATCH 02/23] KVM: MMU: nested EPT cannot be used in SMM Paolo Bonzini
2022-02-04 18:16   ` David Matlack
2022-02-09 22:43     ` Sean Christopherson
2022-02-04 11:56 ` [PATCH 03/23] KVM: MMU: remove valid from extended role Paolo Bonzini
2022-02-04 18:32   ` David Matlack
2022-02-05 14:50     ` Paolo Bonzini
2022-02-09 22:54   ` Sean Christopherson
2022-02-10  9:53     ` Paolo Bonzini
2022-02-04 11:56 ` [PATCH 04/23] KVM: MMU: constify uses of struct kvm_mmu_role_regs Paolo Bonzini
2022-02-04 18:41   ` David Matlack
2022-02-09 22:57   ` Sean Christopherson
2022-02-04 11:57 ` [PATCH 05/23] KVM: MMU: pull computation of kvm_mmu_role_regs to kvm_init_mmu Paolo Bonzini
2022-02-04 18:45   ` David Matlack
2022-02-04 11:57 ` [PATCH 06/23] KVM: MMU: load new PGD once nested two-dimensional paging is initialized Paolo Bonzini
2022-02-04 19:18   ` David Matlack
2022-02-07 13:50     ` Paolo Bonzini
2022-02-07 14:35       ` Paolo Bonzini
2022-02-09 12:34     ` Paolo Bonzini
2022-02-04 11:57 ` [PATCH 07/23] KVM: MMU: remove kvm_mmu_calc_root_page_role Paolo Bonzini
2022-02-04 19:32   ` David Matlack
2022-02-05 14:46     ` Paolo Bonzini
2022-02-10  0:47   ` Sean Christopherson
2022-02-10  9:52     ` Paolo Bonzini
2022-02-10 17:29       ` Sean Christopherson
2022-02-10 17:43         ` Paolo Bonzini
2022-02-04 11:57 ` [PATCH 08/23] KVM: MMU: rephrase unclear comment Paolo Bonzini
2022-02-04 19:38   ` David Matlack
2022-02-04 11:57 ` [PATCH 09/23] KVM: MMU: remove "bool base_only" arguments Paolo Bonzini
2022-02-04 19:41   ` David Matlack
2022-02-04 11:57 ` [PATCH 10/23] KVM: MMU: split cpu_role from mmu_role Paolo Bonzini
2022-02-04 21:57   ` David Matlack
2022-02-05 14:49     ` Paolo Bonzini
2022-02-07 21:38       ` David Matlack
2022-02-04 11:57 ` [PATCH 11/23] KVM: MMU: do not recompute root level from kvm_mmu_role_regs Paolo Bonzini
2022-02-07 22:10   ` David Matlack
2022-02-07 22:17     ` David Matlack
2022-02-04 11:57 ` [PATCH 12/23] KVM: MMU: remove ept_ad field Paolo Bonzini
2022-02-04 11:57 ` [PATCH 13/23] KVM: MMU: remove kvm_calc_shadow_root_page_role_common Paolo Bonzini
2022-02-07 22:25   ` David Matlack
2022-02-04 11:57 ` [PATCH 14/23] KVM: MMU: cleanup computation of MMU roles for two-dimensional paging Paolo Bonzini
2022-02-04 11:57 ` [PATCH 15/23] KVM: MMU: cleanup computation of MMU roles for shadow paging Paolo Bonzini
2022-02-04 11:57 ` [PATCH 16/23] KVM: MMU: remove extended bits from mmu_role Paolo Bonzini
2022-02-04 11:57 ` [PATCH 17/23] KVM: MMU: remove redundant bits from extended role Paolo Bonzini
2022-02-04 11:57 ` [PATCH 18/23] KVM: MMU: fetch shadow EFER.NX from MMU role Paolo Bonzini
2022-02-04 11:57 ` [PATCH 19/23] KVM: MMU: simplify and/or inline computation of shadow MMU roles Paolo Bonzini
2022-02-04 11:57 ` [PATCH 20/23] KVM: MMU: pull CPU role computation to kvm_init_mmu Paolo Bonzini
2022-02-07 22:42   ` David Matlack
2022-02-04 11:57 ` [PATCH 21/23] KVM: MMU: store shadow_root_level into mmu_role Paolo Bonzini
2022-02-07 23:00   ` David Matlack
2022-02-04 11:57 ` Paolo Bonzini [this message]
2022-02-07 23:01   ` [PATCH 22/23] KVM: MMU: use cpu_role for root_level David Matlack
2022-02-04 11:57 ` [PATCH 23/23] KVM: MMU: replace direct_map with mmu_role.direct Paolo Bonzini
2022-02-07 23:02   ` David Matlack
2022-02-07 23:08 ` [PATCH 00/23] KVM: MMU: MMU role refactoring David Matlack
2022-02-07 23:27   ` Sean Christopherson
2022-02-07 23:53     ` David Matlack
2022-02-10  1:11       ` Sean Christopherson
2022-02-10 11:58         ` Paolo Bonzini
2022-02-10 16:55           ` Sean Christopherson
2022-02-10 17:30             ` Paolo Bonzini
2022-02-10 19:28               ` Sean Christopherson
2022-02-09 22:31 ` Sean Christopherson
2022-02-10  9:54   ` Paolo Bonzini
2022-02-14 18:14   ` David Matlack

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220204115718.14934-23-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=seanjc@google.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.