All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	vkuznets@redhat.com
Subject: [PATCH 35/43] KVM: VMX: Shadow VMCS secondary execution controls
Date: Thu, 13 Jun 2019 19:03:21 +0200	[thread overview]
Message-ID: <1560445409-17363-36-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1560445409-17363-1-git-send-email-pbonzini@redhat.com>

From: Sean Christopherson <sean.j.christopherson@intel.com>

Prepare to shadow all major control fields on a per-VMCS basis, which
allows KVM to avoid costly VMWRITEs when switching between vmcs01 and
vmcs02.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx/nested.c | 12 ++++++------
 arch/x86/kvm/vmx/vmx.c    | 40 ++++++++++++++++++++--------------------
 arch/x86/kvm/vmx/vmx.h    |  2 ++
 3 files changed, 28 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index a754a2ed6295..9703fcf19837 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -192,7 +192,7 @@ static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
 
 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 {
-	vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
+	secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
 	vmcs_write64(VMCS_LINK_POINTER, -1ull);
 }
 
@@ -287,6 +287,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
 	vm_exit_controls_reset_shadow(vmx);
 	pin_controls_reset_shadow(vmx);
 	exec_controls_reset_shadow(vmx);
+	secondary_exec_controls_reset_shadow(vmx);
 	vmx_segment_cache_clear(vmx);
 }
 
@@ -2083,7 +2084,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 			vmcs_write16(GUEST_INTR_STATUS,
 				vmcs12->guest_intr_status);
 
-		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+		secondary_exec_controls_init(vmx, exec_control);
 	}
 
 	/*
@@ -2853,8 +2854,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 			hpa = page_to_phys(vmx->nested.apic_access_page);
 			vmcs_write64(APIC_ACCESS_ADDR, hpa);
 		} else {
-			vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
-					SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+			secondary_exec_controls_clearbit(vmx,
+				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
 		}
 	}
 
@@ -4667,8 +4668,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
 {
 	vmx->nested.current_vmptr = vmptr;
 	if (enable_shadow_vmcs) {
-		vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
-			      SECONDARY_EXEC_SHADOW_VMCS);
+		secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
 		vmcs_write64(VMCS_LINK_POINTER,
 			     __pa(vmx->vmcs01.shadow_vmcs));
 		vmx->nested.need_vmcs12_to_shadow_sync = true;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index fcb1a80270bc..1104f52d281c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2909,6 +2909,7 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	/*
 	 * Pass through host's Machine Check Enable value to hw_cr4, which
 	 * is in force while we are in guest mode.  Do not let guests control
@@ -2919,20 +2920,19 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
 	if (enable_unrestricted_guest)
 		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
-	else if (to_vmx(vcpu)->rmode.vm86_active)
+	else if (vmx->rmode.vm86_active)
 		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
 	else
 		hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
 
 	if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
 		if (cr4 & X86_CR4_UMIP) {
-			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
-				SECONDARY_EXEC_DESC);
+			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
 			hw_cr4 &= ~X86_CR4_UMIP;
 		} else if (!is_guest_mode(vcpu) ||
-			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
-			vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
-					SECONDARY_EXEC_DESC);
+			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
+			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
+		}
 	}
 
 	if (cr4 & X86_CR4_VMXE) {
@@ -2947,7 +2947,7 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 			return 1;
 	}
 
-	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
+	if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
 		return 1;
 
 	vcpu->arch.cr4 = cr4;
@@ -3565,7 +3565,7 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
 	u8 mode = 0;
 
 	if (cpu_has_secondary_exec_ctrls() &&
-	    (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+	    (secondary_exec_controls_get(to_vmx(vcpu)) &
 	     SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
 		mode |= MSR_BITMAP_MODE_X2APIC;
 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
@@ -3845,11 +3845,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
 	if (cpu_has_secondary_exec_ctrls()) {
 		if (kvm_vcpu_apicv_active(vcpu))
-			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+			secondary_exec_controls_setbit(vmx,
 				      SECONDARY_EXEC_APIC_REGISTER_VIRT |
 				      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
 		else
-			vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+			secondary_exec_controls_clearbit(vmx,
 					SECONDARY_EXEC_APIC_REGISTER_VIRT |
 					SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
 	}
@@ -4047,8 +4047,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 
 	if (cpu_has_secondary_exec_ctrls()) {
 		vmx_compute_secondary_exec_control(vmx);
-		vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
-			     vmx->secondary_exec_control);
+		secondary_exec_controls_init(vmx, vmx->secondary_exec_control);
 	}
 
 	if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
@@ -5969,6 +5968,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 
 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
 {
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 sec_exec_control;
 
 	if (!lapic_in_kernel(vcpu))
@@ -5980,11 +5980,11 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
 
 	/* Postpone execution until vmcs01 is the current VMCS. */
 	if (is_guest_mode(vcpu)) {
-		to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
+		vmx->nested.change_vmcs01_virtual_apic_mode = true;
 		return;
 	}
 
-	sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+	sec_exec_control = secondary_exec_controls_get(vmx);
 	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
 
@@ -6006,7 +6006,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
 				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
 		break;
 	}
-	vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+	secondary_exec_controls_set(vmx, sec_exec_control);
 
 	vmx_update_msr_bitmap(vcpu);
 }
@@ -6827,7 +6827,7 @@ static int vmx_get_lpage_level(void)
 		return PT_PDPE_LEVEL;
 }
 
-static void vmcs_set_secondary_exec_control(u32 new_ctl)
+static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
 {
 	/*
 	 * These bits in the secondary execution controls field
@@ -6841,10 +6841,10 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 		SECONDARY_EXEC_DESC;
 
-	u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+	u32 new_ctl = vmx->secondary_exec_control;
+	u32 cur_ctl = secondary_exec_controls_get(vmx);
 
-	vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
-		     (new_ctl & ~mask) | (cur_ctl & mask));
+	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
 }
 
 /*
@@ -6982,7 +6982,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 
 	if (cpu_has_secondary_exec_ctrls()) {
 		vmx_compute_secondary_exec_control(vmx);
-		vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
+		vmcs_set_secondary_exec_control(vmx);
 	}
 
 	if (nested_vmx_allowed(vcpu))
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 0bb0f75ebcb9..b4d5684b0be9 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -90,6 +90,7 @@ struct vmx_controls_shadow {
 	u32 vm_exit;
 	u32 pin;
 	u32 exec;
+	u32 secondary_exec;
 };
 
 /*
@@ -427,6 +428,7 @@ static inline u8 vmx_get_rvi(void)
 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
+BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
 
 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
 {
-- 
1.8.3.1



  parent reply	other threads:[~2019-06-13 17:05 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-13 17:02 [PATCH 00/43] VMX optimizations Paolo Bonzini
2019-06-13 17:02 ` [PATCH 01/43] KVM: VMX: Fix handling of #MC that occurs during VM-Entry Paolo Bonzini
2019-06-13 17:24   ` Jim Mattson
2019-06-13 17:02 ` [PATCH 02/43] kvm: nVMX: small cleanup in handle_exception Paolo Bonzini
2019-06-13 17:02 ` [PATCH 03/43] KVM: VMX: Read cached VM-Exit reason to detect external interrupt Paolo Bonzini
2019-06-13 17:02 ` [PATCH 04/43] KVM: VMX: Store the host kernel's IDT base in a global variable Paolo Bonzini
2019-06-13 17:02 ` [PATCH 05/43] KVM: x86: Move kvm_{before,after}_interrupt() calls to vendor code Paolo Bonzini
2019-06-13 17:02 ` [PATCH 06/43] KVM: VMX: Handle NMIs, #MCs and async #PFs in common irqs-disabled fn Paolo Bonzini
2019-06-13 17:02 ` [PATCH 07/43] KVM: nVMX: Intercept VMWRITEs to read-only shadow VMCS fields Paolo Bonzini
2019-06-13 17:02 ` [PATCH 08/43] KVM: nVMX: Intercept VMWRITEs to GUEST_{CS,SS}_AR_BYTES Paolo Bonzini
2019-06-13 17:02 ` [PATCH 09/43] KVM: nVMX: Track vmcs12 offsets for shadowed VMCS fields Paolo Bonzini
2019-06-13 17:02 ` [PATCH 10/43] KVM: nVMX: Lift sync_vmcs12() out of prepare_vmcs12() Paolo Bonzini
2019-06-13 17:02 ` [PATCH 11/43] KVM: nVMX: Use descriptive names for VMCS sync functions and flags Paolo Bonzini
2019-06-13 17:02 ` [PATCH 12/43] KVM: nVMX: Add helpers to identify shadowed VMCS fields Paolo Bonzini
2019-06-14 16:10   ` Sean Christopherson
2019-06-13 17:02 ` [PATCH 13/43] KVM: nVMX: Sync rarely accessed guest fields only when needed Paolo Bonzini
2019-06-13 17:03 ` [PATCH 14/43] KVM: nVMX: Rename prepare_vmcs02_*_full to prepare_vmcs02_*_rare Paolo Bonzini
2019-06-13 17:03 ` [PATCH 15/43] KVM: VMX: Always signal #GP on WRMSR to MSR_IA32_CR_PAT with bad value Paolo Bonzini
2019-06-13 17:03 ` [PATCH 16/43] KVM: nVMX: Always sync GUEST_BNDCFGS when it comes from vmcs01 Paolo Bonzini
     [not found]   ` <20190615221602.93C5721851@mail.kernel.org>
2019-06-15 22:40     ` Liran Alon
2019-06-13 17:03 ` [PATCH 17/43] KVM: nVMX: Write ENCLS-exiting bitmap once per vmcs02 Paolo Bonzini
2019-06-13 17:03 ` [PATCH 18/43] KVM: nVMX: Don't rewrite GUEST_PML_INDEX during nested VM-Entry Paolo Bonzini
2019-06-13 17:03 ` [PATCH 19/43] KVM: VMX: simplify vmx_prepare_switch_to_{guest,host} Paolo Bonzini
2019-06-13 17:03 ` [PATCH 20/43] KVM: nVMX: Don't "put" vCPU or host state when switching VMCS Paolo Bonzini
2019-06-13 17:03 ` [PATCH 21/43] KVM: nVMX: Don't reread VMCS-agnostic " Paolo Bonzini
2019-06-14 16:25   ` Sean Christopherson
2019-06-13 17:03 ` [PATCH 22/43] KVM: nVMX: Don't dump VMCS if virtual APIC page can't be mapped Paolo Bonzini
2019-06-17 19:17   ` Radim Krčmář
2019-06-17 20:07     ` Sean Christopherson
2019-06-18  9:43       ` Paolo Bonzini
2019-06-13 17:03 ` [PATCH 23/43] KVM: nVMX: Don't speculatively write virtual-APIC page address Paolo Bonzini
2019-06-13 17:03 ` [PATCH 24/43] KVM: nVMX: Don't speculatively write APIC-access " Paolo Bonzini
2019-06-13 17:03 ` [PATCH 25/43] KVM: nVMX: Update vmcs12 for MSR_IA32_CR_PAT when it's written Paolo Bonzini
2019-06-13 17:03 ` [PATCH 26/43] KVM: nVMX: Update vmcs12 for SYSENTER MSRs when they're written Paolo Bonzini
2019-06-13 17:03 ` [PATCH 27/43] KVM: nVMX: Update vmcs12 for MSR_IA32_DEBUGCTLMSR when it's written Paolo Bonzini
2019-06-13 17:03 ` [PATCH 28/43] KVM: nVMX: Don't update GUEST_BNDCFGS if it's clean in HV eVMCS Paolo Bonzini
2019-06-13 17:03 ` [PATCH 29/43] KVM: x86: introduce is_pae_paging Paolo Bonzini
2019-06-13 17:03 ` [PATCH 30/43] KVM: nVMX: Copy PDPTRs to/from vmcs12 only when necessary Paolo Bonzini
2019-06-13 17:03 ` [PATCH 31/43] KVM: nVMX: Use adjusted pin controls for vmcs02 Paolo Bonzini
2019-06-13 17:03 ` [PATCH 32/43] KVM: VMX: Add builder macros for shadowing controls Paolo Bonzini
2019-06-13 17:03 ` [PATCH 33/43] KVM: VMX: Shadow VMCS pin controls Paolo Bonzini
2019-06-13 17:03 ` [PATCH 34/43] KVM: VMX: Shadow VMCS primary execution controls Paolo Bonzini
2019-06-13 17:03 ` Paolo Bonzini [this message]
2019-06-13 17:03 ` [PATCH 36/43] KVM: nVMX: Shadow VMCS controls on a per-VMCS basis Paolo Bonzini
2019-06-13 17:03 ` [PATCH 37/43] KVM: nVMX: Don't reset VMCS controls shadow on VMCS switch Paolo Bonzini
2019-06-13 17:03 ` [PATCH 38/43] KVM: VMX: Explicitly initialize controls shadow at VMCS allocation Paolo Bonzini
2019-06-13 17:03 ` [PATCH 39/43] KVM: nVMX: Preserve last USE_MSR_BITMAPS when preparing vmcs02 Paolo Bonzini
2019-06-13 17:03 ` [PATCH 40/43] KVM: nVMX: Preset *DT exiting in vmcs02 when emulating UMIP Paolo Bonzini
2019-06-13 17:03 ` [PATCH 41/43] KVM: VMX: Drop hv_timer_armed from 'struct loaded_vmcs' Paolo Bonzini
2019-06-13 17:03 ` [PATCH 42/43] KVM: VMX: Leave preemption timer running when it's disabled Paolo Bonzini
2019-06-14 16:34   ` Sean Christopherson
2019-06-13 17:03 ` [PATCH 43/43] KVM: nVMX: shadow pin based execution controls Paolo Bonzini
2019-06-14 16:34   ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1560445409-17363-36-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.