All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Jim Mattson" <jmattson@google.com>,
	"Liran Alon" <liran.alon@oracle.com>,
	linux-kernel@vger.kernel.org
Subject: [PATCH v1 RESEND 6/9] x86/kvm/mmu: make space for source data caching in struct kvm_mmu
Date: Tue, 18 Sep 2018 18:09:03 +0200	[thread overview]
Message-ID: <20180918160906.9241-7-vkuznets@redhat.com> (raw)
In-Reply-To: <20180918160906.9241-1-vkuznets@redhat.com>

In preparation to MMU reconfiguration avoidance we need a space to
cache source data. As this partially intersects with kvm_mmu_page_role,
create 64bit sized union kvm_mmu_role holding both base_role and
extended data. No functional change.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 14 +++++++++++++-
 arch/x86/kvm/mmu.c              | 19 ++++++++++++-------
 arch/x86/kvm/vmx.c              |  2 +-
 3 files changed, 26 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 527aaf45eba6..6ca7d28d57e9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -273,6 +273,18 @@ union kvm_mmu_page_role {
 	};
 };
 
+union kvm_mmu_scache {
+	unsigned int word;
+};
+
+union kvm_mmu_role {
+	unsigned long as_u64;
+	struct {
+		union kvm_mmu_page_role base_role;
+		union kvm_mmu_scache scache;
+	};
+};
+
 struct kvm_rmap_head {
 	unsigned long val;
 };
@@ -360,7 +372,7 @@ struct kvm_mmu {
 	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			   u64 *spte, const void *pte);
 	hpa_t root_hpa;
-	union kvm_mmu_page_role base_role;
+	union kvm_mmu_role mmu_role;
 	u8 root_level;
 	u8 shadow_root_level;
 	u8 ept_ad;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5f167823c50d..8d8e6fa75fa3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2359,7 +2359,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 	int collisions = 0;
 	LIST_HEAD(invalid_list);
 
-	role = vcpu->arch.mmu->base_role;
+	role = vcpu->arch.mmu->mmu_role.base_role;
 	role.level = level;
 	role.direct = direct;
 	if (role.direct)
@@ -4407,7 +4407,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 {
-	bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+	bool uses_nx = context->nx ||
+		context->mmu_role.base_role.smep_andnot_wp;
 	struct rsvd_bits_validate *shadow_zero_check;
 	int i;
 
@@ -4726,7 +4727,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu *context = vcpu->arch.mmu;
 
-	context->base_role.word = mmu_base_role_mask.word &
+	context->mmu_role.base_role.word = mmu_base_role_mask.word &
 				  kvm_calc_tdp_mmu_root_page_role(vcpu).word;
 	context->page_fault = tdp_page_fault;
 	context->sync_page = nonpaging_sync_page;
@@ -4807,7 +4808,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 	else
 		paging32_init_context(vcpu, context);
 
-	context->base_role.word = mmu_base_role_mask.word &
+	context->mmu_role.base_role.word = mmu_base_role_mask.word &
 				  kvm_calc_shadow_mmu_root_page_role(vcpu).word;
 	reset_shadow_zero_bits_mask(vcpu, context);
 }
@@ -4816,7 +4817,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 static union kvm_mmu_page_role
 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
 {
-	union kvm_mmu_page_role role = vcpu->arch.mmu->base_role;
+	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base_role;
 
 	role.level = PT64_ROOT_4LEVEL;
 	role.direct = false;
@@ -4846,7 +4847,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 	context->update_pte = ept_update_pte;
 	context->root_level = PT64_ROOT_4LEVEL;
 	context->direct_map = false;
-	context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
+	context->mmu_role.base_role.word =
+		root_page_role.word & mmu_base_role_mask.word;
 	context->get_pdptr = kvm_pdptr_read;
 
 	update_permission_bitmask(vcpu, context, true);
@@ -5161,10 +5163,13 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 		local_flush = true;
 		while (npte--) {
+			unsigned int base_role =
+				vcpu->arch.mmu->mmu_role.base_role.word;
+
 			entry = *spte;
 			mmu_page_zap_pte(vcpu->kvm, sp, spte);
 			if (gentry &&
-			      !((sp->role.word ^ vcpu->arch.mmu->base_role.word)
+			      !((sp->role.word ^ base_role)
 			      & mmu_base_role_mask.word) && rmap_can_add(vcpu))
 				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
 			if (need_remote_flush(entry, *spte))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 29af40b9239f..79e0b0570dd1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9290,7 +9290,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
 
 		kvm_mmu_unload(vcpu);
 		mmu->ept_ad = accessed_dirty;
-		mmu->base_role.ad_disabled = !accessed_dirty;
+		mmu->mmu_role.base_role.ad_disabled = !accessed_dirty;
 		vmcs12->ept_pointer = address;
 		/*
 		 * TODO: Check what's the correct approach in case
-- 
2.14.4


  parent reply	other threads:[~2018-09-18 16:09 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-18 16:08 [PATCH v1 RESEND 0/9] x86/kvm/nVMX: optimize MMU switch between L1 and L2 Vitaly Kuznetsov
2018-09-18 16:08 ` [PATCH v1 RESEND 1/9] x86/kvm/mmu: make vcpu->mmu a pointer to the current MMU Vitaly Kuznetsov
2018-09-18 16:08 ` [PATCH v1 RESEND 2/9] x86/kvm/mmu.c: set get_pdptr hook in kvm_init_shadow_ept_mmu() Vitaly Kuznetsov
2018-09-18 16:09 ` [PATCH v1 RESEND 3/9] x86/kvm/mmu.c: add kvm_mmu parameter to kvm_mmu_free_roots() Vitaly Kuznetsov
2018-09-18 16:09 ` [PATCH v1 RESEND 4/9] x86/kvm/mmu: introduce guest_mmu Vitaly Kuznetsov
2018-09-19 15:08   ` Sean Christopherson
2018-09-18 16:09 ` [PATCH v1 RESEND 5/9] x86/kvm/mmu: get rid of redundant kvm_mmu_setup() Vitaly Kuznetsov
2018-09-18 16:09 ` Vitaly Kuznetsov [this message]
2018-09-19 15:30   ` [PATCH v1 RESEND 6/9] x86/kvm/mmu: make space for source data caching in struct kvm_mmu Sean Christopherson
2018-09-18 16:09 ` [PATCH v1 RESEND 7/9] x86/kvm/nVMX: introduce scache for kvm_init_shadow_ept_mmu Vitaly Kuznetsov
2018-09-18 16:09 ` [PATCH v1 RESEND 8/9] x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed Vitaly Kuznetsov
2018-09-18 16:09 ` [PATCH v1 RESEND 9/9] x86/kvm/mmu: check if MMU reconfiguration is needed in init_kvm_nested_mmu() Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180918160906.9241-7-vkuznets@redhat.com \
    --to=vkuznets@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=liran.alon@oracle.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.