linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/24] KVM: nSVM: event fixes and migration support
@ 2020-05-20 17:21 Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF Paolo Bonzini
                   ` (24 more replies)
  0 siblings, 25 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Large parts of this series were posted before (patches 1, 3-4-5 and
6-7-8-12-13-14).  This is basically what I'd like to get into 5.8 as
far as nested SVM is concerned; the fix for exception vmexits is related
to migration support, because it gets rid of the exit_required flag
and therefore consolidates the SVM migration format.

There are a couple more bugfixes (2 and 21), the latter of which actually
affects VMX as well.

The SVM migration data consists of:

- the GIF state

- the guest mode and nested-run-pending flags

- the host state from before VMRUN

- the nested VMCB control state

The last two items are conveniently packaged in VMCB format.  Compared
to the previous prototype, HF_HIF_MASK is removed since it is part of
"the host state from before VMRUN".

The patch has been tested with the QEMU changes after my signature,
where it also fixes system_reset while x86/svm.flat runs.

Paolo

Paolo Bonzini (24):
  KVM: nSVM: fix condition for filtering async PF
  KVM: nSVM: leave ASID aside in copy_vmcb_control_area
  KVM: nSVM: inject exceptions via svm_check_nested_events
  KVM: nSVM: remove exit_required
  KVM: nSVM: correctly inject INIT vmexits
  KVM: nSVM: move map argument out of enter_svm_guest_mode
  KVM: nSVM: extract load_nested_vmcb_control
  KVM: nSVM: extract preparation of VMCB for nested run
  KVM: nSVM: clean up tsc_offset update
  KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area
  KVM: nSVM: remove trailing padding for struct vmcb_control_area
  KVM: nSVM: save all control fields in svm->nested
  KVM: nSVM: do not reload pause filter fields from VMCB
  KVM: nSVM: remove HF_VINTR_MASK
  KVM: nSVM: remove HF_HIF_MASK
  KVM: nSVM: split nested_vmcb_check_controls
  KVM: nSVM: do all MMU switch work in init/uninit functions
  KVM: nSVM: leave guest mode when clearing EFER.SVME
  KVM: nSVM: extract svm_set_gif
  KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu
  KVM: x86: always update CR3 in VMCB
  uaccess: add memzero_user
  selftests: kvm: add a SVM version of state-test
  KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE

 arch/x86/include/asm/kvm_host.h               |   2 -
 arch/x86/include/asm/svm.h                    |   9 +-
 arch/x86/include/uapi/asm/kvm.h               |  17 +-
 arch/x86/kvm/cpuid.h                          |   5 +
 arch/x86/kvm/irq.c                            |   1 +
 arch/x86/kvm/mmu.h                            |   2 +-
 arch/x86/kvm/mmu/mmu.c                        |  14 +-
 arch/x86/kvm/svm/nested.c                     | 525 +++++++++++-------
 arch/x86/kvm/svm/svm.c                        | 107 ++--
 arch/x86/kvm/svm/svm.h                        |  32 +-
 arch/x86/kvm/vmx/nested.c                     |   5 -
 arch/x86/kvm/vmx/vmx.c                        |   5 +-
 arch/x86/kvm/x86.c                            |   3 +-
 include/linux/uaccess.h                       |   1 +
 lib/usercopy.c                                |  63 +++
 .../testing/selftests/kvm/x86_64/state_test.c |  65 ++-
 16 files changed, 549 insertions(+), 307 deletions(-)

-- 
2.18.2


diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
index 3f3f780c8c..c4a8c10e2d 100644
--- a/linux-headers/asm-x86/kvm.h
+++ b/linux-headers/asm-x86/kvm.h
@@ -385,18 +385,22 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
 
 #define KVM_STATE_NESTED_FORMAT_VMX	0
-#define KVM_STATE_NESTED_FORMAT_SVM	1	/* unused */
+#define KVM_STATE_NESTED_FORMAT_SVM	1
 
 #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
 #define KVM_STATE_NESTED_EVMCS		0x00000004
 #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
+#define KVM_STATE_NESTED_GIF_SET	0x00000100
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
 
 #define KVM_STATE_NESTED_VMX_VMCS_SIZE	0x1000
 
+#define KVM_STATE_NESTED_SVM_VMCB_SIZE	0x1000
+
+
 struct kvm_vmx_nested_state_data {
 	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
 	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -411,6 +415,15 @@ struct kvm_vmx_nested_state_hdr {
 	} smm;
 };
 
+struct kvm_svm_nested_state_data {
+	/* Save area only used if KVM_STATE_NESTED_RUN_PENDING.  */
+	__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
+};
+
+struct kvm_svm_nested_state_hdr {
+	__u64 vmcb_pa;
+};
+
 /* for KVM_CAP_NESTED_STATE */
 struct kvm_nested_state {
 	__u16 flags;
@@ -419,6 +432,7 @@ struct kvm_nested_state {
 
 	union {
 		struct kvm_vmx_nested_state_hdr vmx;
+		struct kvm_svm_nested_state_hdr svm;
 
 		/* Pad the header to 128 bytes.  */
 		__u8 pad[120];
@@ -431,6 +445,7 @@ struct kvm_nested_state {
 	 */
 	union {
 		struct kvm_vmx_nested_state_data vmx[0];
+		struct kvm_svm_nested_state_data svm[0];
 	} data;
 };
 
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index 9804495a46..f4ff71da0b 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_VCPU_RESETS 179
 #define KVM_CAP_S390_PROTECTED 180
 #define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_HALT_POLL 182
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index e818fc712a..9627f88ebf 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2103,6 +2103,11 @@ static inline bool cpu_has_vmx(CPUX86State *env)
     return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
 }
 
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+    return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
+}
+
 /*
  * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
  * Since it was set, CR4.VMXE must remain set as long as vCPU is in
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 9c256ab159..6833400191 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5939,6 +5939,7 @@ static void x86_cpu_reset(DeviceState *dev)
     /* init to reset state */
 
     env->hflags2 |= HF2_GIF_MASK;
+    env->hflags &= ~HF_GUEST_MASK;
 
     cpu_x86_update_cr0(env, 0x60000010);
     env->a20_mask = ~0x0;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 4901c6dd74..599a34b49d 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -1834,16 +1834,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
     if (max_nested_state_len > 0) {
         assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
 
-        if (cpu_has_vmx(env)) {
+        if (cpu_has_vmx(env) || cpu_has_svm(env)) {
             struct kvm_vmx_nested_state_hdr *vmx_hdr;
 
             env->nested_state = g_malloc0(max_nested_state_len);
             env->nested_state->size = max_nested_state_len;
             env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
 
-            vmx_hdr = &env->nested_state->hdr.vmx;
-            vmx_hdr->vmxon_pa = -1ull;
-            vmx_hdr->vmcs12_pa = -1ull;
+            if (cpu_has_vmx(env)) {
+                    vmx_hdr = &env->nested_state->hdr.vmx;
+                    vmx_hdr->vmxon_pa = -1ull;
+                    vmx_hdr->vmcs12_pa = -1ull;
+            }
         }
     }
 
@@ -3847,6 +3849,20 @@ static int kvm_put_nested_state(X86CPU *cpu)
         return 0;
     }
 
+    /*
+     * Copy flags that are affected by reset from env->hflags and env->hflags2.
+     */
+    if (env->hflags & HF_GUEST_MASK) {
+        env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
+    } else {
+        env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
+    }
+    if (env->hflags2 & HF2_GIF_MASK) {
+        env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
+    } else {
+        env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
+    }
+
     assert(env->nested_state->size <= max_nested_state_len);
     return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
 }
@@ -3875,11 +3891,19 @@ static int kvm_get_nested_state(X86CPU *cpu)
         return ret;
     }
 
+    /*
+     * Copy flags that are affected by reset to env->hflags and env->hflags2.
+     */
     if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
         env->hflags |= HF_GUEST_MASK;
     } else {
         env->hflags &= ~HF_GUEST_MASK;
     }
+    if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
+        env->hflags2 |= HF2_GIF_MASK;
+    } else {
+        env->hflags2 &= ~HF2_GIF_MASK;
+    }
 
     return ret;
 }
@@ -3891,6 +3915,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
 
     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
 
+    /* must be before kvm_put_nested_state so that EFER.SVME is set */
+    ret = kvm_put_sregs(x86_cpu);
+    if (ret < 0) {
+        return ret;
+    }
+
     if (level >= KVM_PUT_RESET_STATE) {
         ret = kvm_put_nested_state(x86_cpu);
         if (ret < 0) {
@@ -3924,10 +3954,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
     if (ret < 0) {
         return ret;
     }
-    ret = kvm_put_sregs(x86_cpu);
-    if (ret < 0) {
-        return ret;
-    }
     /* must be before kvm_put_msrs */
     ret = kvm_inject_mce_oldstyle(x86_cpu);
     if (ret < 0) {
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 0c96531a56..8684a247c1 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1071,13 +1071,40 @@ static const VMStateDescription vmstate_vmx_nested_state = {
     }
 };
 
+static bool svm_nested_state_needed(void *opaque)
+{
+    struct kvm_nested_state *nested_state = opaque;
+
+    /*
+     * HF2_GIF_MASK is relevant for non-guest mode but it is already
+     * serialized via hflags2.
+     */
+    return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
+            nested_state->size > offsetof(struct kvm_nested_state, data));
+}
+
+static const VMStateDescription vmstate_svm_nested_state = {
+    .name = "cpu/kvm_nested_state/svm",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = svm_nested_state_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
+        VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
+                            struct kvm_nested_state,
+                            KVM_STATE_NESTED_SVM_VMCB_SIZE),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static bool nested_state_needed(void *opaque)
 {
     X86CPU *cpu = opaque;
     CPUX86State *env = &cpu->env;
 
     return (env->nested_state &&
-            vmx_nested_state_needed(env->nested_state));
+            (vmx_nested_state_needed(env->nested_state) ||
+             svm_nested_state_needed(env->nested_state)));
 }
 
 static int nested_state_post_load(void *opaque, int version_id)
@@ -1139,6 +1166,7 @@ static const VMStateDescription vmstate_kvm_nested_state = {
     },
     .subsections = (const VMStateDescription*[]) {
         &vmstate_vmx_nested_state,
+        &vmstate_svm_nested_state,
         NULL
     }
 };




^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-26  0:23   ` Sasha Levin
  2020-05-20 17:21 ` [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area Paolo Bonzini
                   ` (23 subsequent siblings)
  24 siblings, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel, stable

Async page faults have to be trapped in the host (L1 in this case),
since the APF reason was passed from L0 to L1 and stored in the L1 APF
data page.  This was completely reversed: the page faults were passed
to the guest, a L2 hypervisor.

Cc: stable@vger.kernel.org
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index a89a166d1cb8..f4cd2d0cc360 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -880,8 +880,8 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
-		/* When we're shadowing, trap PFs, but not async PF */
-		if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
+		/* Trap async PF even if not shadowing */
+		if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
 			return NESTED_EXIT_HOST;
 		break;
 	default:
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-26  0:23   ` Sasha Levin
  2020-05-20 17:21 ` [PATCH 03/24] KVM: nSVM: inject exceptions via svm_check_nested_events Paolo Bonzini
                   ` (22 subsequent siblings)
  24 siblings, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel, stable

Restoring the ASID from the hsave area on VMEXIT is wrong, because its
value depends on the handling of TLB flushes.  Just skipping the field in
copy_vmcb_control_area will do.

Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index f4cd2d0cc360..d544cce4f964 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -150,7 +150,7 @@ static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb
 	dst->iopm_base_pa         = from->iopm_base_pa;
 	dst->msrpm_base_pa        = from->msrpm_base_pa;
 	dst->tsc_offset           = from->tsc_offset;
-	dst->asid                 = from->asid;
+	/* asid not copied, it is handled manually for svm->vmcb.  */
 	dst->tlb_ctl              = from->tlb_ctl;
 	dst->int_ctl              = from->int_ctl;
 	dst->int_vector           = from->int_vector;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 03/24] KVM: nSVM: inject exceptions via svm_check_nested_events
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 04/24] KVM: nSVM: remove exit_required Paolo Bonzini
                   ` (21 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

This allows exceptions injected by the emulator to be properly delivered
as vmexits.  The code also becomes simpler, because we can just let all
L0-intercepted exceptions go through the usual path.  In particular, our
emulation of the VMX #DB exit qualification is very much simplified,
because the vmexit injection path can use kvm_deliver_exception_payload
to update DR6.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 129 ++++++++++++++------------------------
 arch/x86/kvm/svm/svm.c    |   9 ---
 arch/x86/kvm/svm/svm.h    |   1 +
 3 files changed, 47 insertions(+), 92 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index d544cce4f964..e80349132ea1 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -111,6 +111,8 @@ void recalc_intercepts(struct vcpu_svm *svm)
 	h = &svm->nested.hsave->control;
 	g = &svm->nested;
 
+	svm->nested.host_intercept_exceptions = h->intercept_exceptions;
+
 	c->intercept_cr = h->intercept_cr;
 	c->intercept_dr = h->intercept_dr;
 	c->intercept_exceptions = h->intercept_exceptions;
@@ -616,50 +618,6 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
 }
 
-/* DB exceptions for our internal use must not cause vmexit */
-static int nested_svm_intercept_db(struct vcpu_svm *svm)
-{
-	unsigned long dr6 = svm->vmcb->save.dr6;
-
-	/* Always catch it and pass it to userspace if debugging.  */
-	if (svm->vcpu.guest_debug &
-	    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-		return NESTED_EXIT_HOST;
-
-	/* if we're not singlestepping, it's not ours */
-	if (!svm->nmi_singlestep)
-		goto reflected_db;
-
-	/* if it's not a singlestep exception, it's not ours */
-	if (!(dr6 & DR6_BS))
-		goto reflected_db;
-
-	/* if the guest is singlestepping, it should get the vmexit */
-	if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
-		disable_nmi_singlestep(svm);
-		goto reflected_db;
-	}
-
-	/* it's ours, the nested hypervisor must not see this one */
-	return NESTED_EXIT_HOST;
-
-reflected_db:
-	/*
-	 * Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
-	 * it will be moved into the nested VMCB by nested_svm_vmexit.  Once
-	 * exceptions will be moved to svm_check_nested_events, all this stuff
-	 * will just go away and we could just return NESTED_EXIT_HOST
-	 * unconditionally.  db_interception will queue the exception, which
-	 * will be processed by svm_check_nested_events if a nested vmexit is
-	 * required, and we will just use kvm_deliver_exception_payload to copy
-	 * the payload to DR6 before vmexit.
-	 */
-	WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
-	svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
-	svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
-	return NESTED_EXIT_DONE;
-}
-
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
 	unsigned port, size, iopm_len;
@@ -710,20 +668,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
 		break;
 	}
 	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
-		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
-		if (svm->nested.intercept_exceptions & excp_bits) {
-			if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
-				vmexit = nested_svm_intercept_db(svm);
-			else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
-				 svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
-				vmexit = NESTED_EXIT_HOST;
-			else
-				vmexit = NESTED_EXIT_DONE;
-		}
-		/* async page fault always cause vmexit */
-		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
-			 svm->vcpu.arch.exception.nested_apf != 0)
-			vmexit = NESTED_EXIT_DONE;
+		/*
+		 * Host-intercepted exceptions have been checked already in
+		 * nested_svm_exit_special.  There is nothing to do here,
+		 * the vmexit is injected by svm_check_nested_events.
+		 */
+		vmexit = NESTED_EXIT_DONE;
 		break;
 	}
 	case SVM_EXIT_ERR: {
@@ -768,35 +718,38 @@ int nested_svm_check_permissions(struct vcpu_svm *svm)
 	return 0;
 }
 
-int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
-			       bool has_error_code, u32 error_code)
+static bool nested_exit_on_exception(struct vcpu_svm *svm)
 {
-	int vmexit;
+	unsigned int nr = svm->vcpu.arch.exception.nr;
 
-	if (!is_guest_mode(&svm->vcpu))
-		return 0;
+	return (svm->nested.intercept_exceptions & (1 << nr));
+}
 
-	vmexit = nested_svm_intercept(svm);
-	if (vmexit != NESTED_EXIT_DONE)
-		return 0;
+static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
+{
+	unsigned int nr = svm->vcpu.arch.exception.nr;
 
 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
 	svm->vmcb->control.exit_code_hi = 0;
-	svm->vmcb->control.exit_info_1 = error_code;
+
+	if (svm->vcpu.arch.exception.has_error_code)
+		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
 
 	/*
 	 * EXITINFO2 is undefined for all exception intercepts other
 	 * than #PF.
 	 */
-	if (svm->vcpu.arch.exception.nested_apf)
-		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
-	else if (svm->vcpu.arch.exception.has_payload)
-		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
-	else
-		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
+	if (nr == PF_VECTOR) {
+		if (svm->vcpu.arch.exception.nested_apf)
+			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
+		else if (svm->vcpu.arch.exception.has_payload)
+			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
+		else
+			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
+	} else if (svm->vcpu.arch.exception.has_payload)
+		kvm_deliver_exception_payload(&svm->vcpu);
 
-	svm->nested.exit_required = true;
-	return vmexit;
+	nested_svm_vmexit(svm);
 }
 
 static void nested_svm_smi(struct vcpu_svm *svm)
@@ -835,6 +788,15 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 		kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
 		svm->nested.nested_run_pending;
 
+	if (vcpu->arch.exception.pending) {
+		if (block_nested_events)
+                        return -EBUSY;
+		if (!nested_exit_on_exception(svm))
+			return 0;
+		nested_svm_inject_exception_vmexit(svm);
+		return 0;
+	}
+
 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
 		if (block_nested_events)
 			return -EBUSY;
@@ -872,18 +834,19 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
 	switch (exit_code) {
 	case SVM_EXIT_INTR:
 	case SVM_EXIT_NMI:
-	case SVM_EXIT_EXCP_BASE + MC_VECTOR:
-		return NESTED_EXIT_HOST;
 	case SVM_EXIT_NPF:
-		/* For now we are always handling NPFs when using them */
-		if (npt_enabled)
+		return NESTED_EXIT_HOST;
+	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
+		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
+
+		if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
 			return NESTED_EXIT_HOST;
-		break;
-	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
-		/* Trap async PF even if not shadowing */
-		if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
+		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
+			 svm->vcpu.arch.apf.host_apf_reason)
+			/* Trap async PF even if not shadowing */
 			return NESTED_EXIT_HOST;
 		break;
+	}
 	default:
 		break;
 	}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9987f6fe9d88..9da4e5b6d724 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -331,17 +331,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
 	struct vcpu_svm *svm = to_svm(vcpu);
 	unsigned nr = vcpu->arch.exception.nr;
 	bool has_error_code = vcpu->arch.exception.has_error_code;
-	bool reinject = vcpu->arch.exception.injected;
 	u32 error_code = vcpu->arch.exception.error_code;
 
-	/*
-	 * If we are within a nested VM we'd better #VMEXIT and let the guest
-	 * handle the exception
-	 */
-	if (!reinject &&
-	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
-		return;
-
 	kvm_deliver_exception_payload(&svm->vcpu);
 
 	if (nr == BP_VECTOR && !nrips) {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5cc559ab862d..8342032291fc 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -86,6 +86,7 @@ struct nested_state {
 	u64 hsave_msr;
 	u64 vm_cr_msr;
 	u64 vmcb;
+	u32 host_intercept_exceptions;
 
 	/* These are the merged vectors */
 	u32 *msrpm;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 04/24] KVM: nSVM: remove exit_required
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (2 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 03/24] KVM: nSVM: inject exceptions via svm_check_nested_events Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 05/24] KVM: nSVM: correctly inject INIT vmexits Paolo Bonzini
                   ` (20 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

All events now inject vmexits before vmentry rather than after vmexit.  Therefore,
exit_required is not set anymore and we can remove it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c |  3 +--
 arch/x86/kvm/svm/svm.c    | 14 --------------
 arch/x86/kvm/svm/svm.h    |  3 ---
 3 files changed, 1 insertion(+), 19 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index e80349132ea1..dd2868dd6129 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -785,8 +785,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	bool block_nested_events =
-		kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
-		svm->nested.nested_run_pending;
+		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
 
 	if (vcpu->arch.exception.pending) {
 		if (block_nested_events)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9da4e5b6d724..04332d0efa5f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2889,13 +2889,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	if (npt_enabled)
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
 
-	if (unlikely(svm->nested.exit_required)) {
-		nested_svm_vmexit(svm);
-		svm->nested.exit_required = false;
-
-		return 1;
-	}
-
 	if (is_guest_mode(vcpu)) {
 		int vmexit;
 
@@ -3327,13 +3320,6 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
-	/*
-	 * A vmexit emulation is required before the vcpu can be executed
-	 * again.
-	 */
-	if (unlikely(svm->nested.exit_required))
-		return EXIT_FASTPATH_NONE;
-
 	/*
 	 * Disable singlestep if we're injecting an interrupt/exception.
 	 * We don't want our modified rflags to be pushed on the stack where
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8342032291fc..89fab75dd4f5 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -95,9 +95,6 @@ struct nested_state {
 	u64 vmcb_msrpm;
 	u64 vmcb_iopm;
 
-	/* A VMEXIT is required but not yet emulated */
-	bool exit_required;
-
 	/* A VMRUN has started but has not yet been performed, so
 	 * we cannot inject a nested vmexit yet.  */
 	bool nested_run_pending;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 05/24] KVM: nSVM: correctly inject INIT vmexits
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (3 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 04/24] KVM: nSVM: remove exit_required Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 06/24] KVM: nSVM: move map argument out of enter_svm_guest_mode Paolo Bonzini
                   ` (19 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

The usual drill at this point, except there is no code to remove because this
case was not handled at all.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index dd2868dd6129..7efefceb5f2f 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -25,6 +25,7 @@
 #include "trace.h"
 #include "mmu.h"
 #include "x86.h"
+#include "lapic.h"
 #include "svm.h"
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -781,11 +782,37 @@ static void nested_svm_intr(struct vcpu_svm *svm)
 	nested_svm_vmexit(svm);
 }
 
+static inline bool nested_exit_on_init(struct vcpu_svm *svm)
+{
+	return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
+}
+
+static void nested_svm_init(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
+	svm->vmcb->control.exit_info_1 = 0;
+	svm->vmcb->control.exit_info_2 = 0;
+
+	nested_svm_vmexit(svm);
+}
+
+
 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	bool block_nested_events =
 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	if (lapic_in_kernel(vcpu) &&
+	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+		if (block_nested_events)
+			return -EBUSY;
+		if (!nested_exit_on_init(svm))
+			return 0;
+		nested_svm_init(svm);
+		return 0;
+	}
 
 	if (vcpu->arch.exception.pending) {
 		if (block_nested_events)
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 06/24] KVM: nSVM: move map argument out of enter_svm_guest_mode
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (4 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 05/24] KVM: nSVM: correctly inject INIT vmexits Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 07/24] KVM: nSVM: extract load_nested_vmcb_control Paolo Bonzini
                   ` (18 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Unmapping the nested VMCB in enter_svm_guest_mode is a bit of a wart,
since the map is not used elsewhere in the function.  There are
just two calls, so move it there.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 14 ++++++--------
 arch/x86/kvm/svm/svm.c    |  3 ++-
 arch/x86/kvm/svm/svm.h    |  2 +-
 3 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 7efefceb5f2f..083f11d5e3fa 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -229,7 +229,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
 }
 
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-			  struct vmcb *nested_vmcb, struct kvm_host_map *map)
+			  struct vmcb *nested_vmcb)
 {
 	bool evaluate_pending_interrupts =
 		is_intercept(svm, INTERCEPT_VINTR) ||
@@ -308,8 +308,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	svm->vmcb->control.pause_filter_thresh =
 		nested_vmcb->control.pause_filter_thresh;
 
-	kvm_vcpu_unmap(&svm->vcpu, map, true);
-
 	/* Enter Guest-Mode */
 	enter_guest_mode(&svm->vcpu);
 
@@ -372,10 +370,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 		nested_vmcb->control.exit_code_hi = 0;
 		nested_vmcb->control.exit_info_1  = 0;
 		nested_vmcb->control.exit_info_2  = 0;
-
-		kvm_vcpu_unmap(&svm->vcpu, &map, true);
-
-		return ret;
+		goto out;
 	}
 
 	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
@@ -418,7 +413,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 	copy_vmcb_control_area(hsave, vmcb);
 
 	svm->nested.nested_run_pending = 1;
-	enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
+	enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
 
 	if (!nested_svm_vmrun_msrpm(svm)) {
 		svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
@@ -429,6 +424,9 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 		nested_svm_vmexit(svm);
 	}
 
+out:
+	kvm_vcpu_unmap(&svm->vcpu, &map, true);
+
 	return ret;
 }
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 04332d0efa5f..47c565338426 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3820,7 +3820,8 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 		if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
 			return 1;
 		nested_vmcb = map.hva;
-		enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
+		enter_svm_guest_mode(svm, vmcb, nested_vmcb);
+		kvm_vcpu_unmap(&svm->vcpu, &map, true);
 	}
 	return 0;
 }
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 89fab75dd4f5..33e3f09d7a8e 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -395,7 +395,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
 }
 
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-			  struct vmcb *nested_vmcb, struct kvm_host_map *map);
+			  struct vmcb *nested_vmcb);
 int nested_svm_vmrun(struct vcpu_svm *svm);
 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 07/24] KVM: nSVM: extract load_nested_vmcb_control
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (5 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 06/24] KVM: nSVM: move map argument out of enter_svm_guest_mode Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 08/24] KVM: nSVM: extract preparation of VMCB for nested run Paolo Bonzini
                   ` (17 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

When restoring SVM nested state, the control state cache in svm->nested
will have to be filled, but the save state will not have to be moved
into svm->vmcb.  Therefore, pull the code that handles the control area
into a separate function.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 38 ++++++++++++++++++++++----------------
 1 file changed, 22 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 083f11d5e3fa..b1d0d0519664 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -228,6 +228,23 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
 	return true;
 }
 
+static void load_nested_vmcb_control(struct vcpu_svm *svm,
+				     struct vmcb_control_area *control)
+{
+	svm->nested.nested_cr3 = control->nested_cr3;
+
+	svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL;
+	svm->nested.vmcb_iopm  = control->iopm_base_pa  & ~0x0fffULL;
+
+	/* cache intercepts */
+	svm->nested.intercept_cr         = control->intercept_cr;
+	svm->nested.intercept_dr         = control->intercept_dr;
+	svm->nested.intercept_exceptions = control->intercept_exceptions;
+	svm->nested.intercept            = control->intercept;
+
+	svm->vcpu.arch.tsc_offset += control->tsc_offset;
+}
+
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 			  struct vmcb *nested_vmcb)
 {
@@ -235,15 +252,16 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 		is_intercept(svm, INTERCEPT_VINTR) ||
 		is_intercept(svm, INTERCEPT_IRET);
 
+	svm->nested.vmcb = vmcb_gpa;
 	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
 		svm->vcpu.arch.hflags |= HF_HIF_MASK;
 	else
 		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
 
-	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
-		svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
+	load_nested_vmcb_control(svm, &nested_vmcb->control);
+
+	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
 		nested_svm_init_mmu_context(&svm->vcpu);
-	}
 
 	/* Load the nested guest state */
 	svm->vmcb->save.es = nested_vmcb->save.es;
@@ -278,25 +296,15 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
 	svm->vmcb->save.cpl = nested_vmcb->save.cpl;
 
-	svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
-	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
-
-	/* cache intercepts */
-	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
-	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
-	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
-	svm->nested.intercept            = nested_vmcb->control.intercept;
-
 	svm_flush_tlb(&svm->vcpu);
-	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
 	else
 		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
-	svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
 
+	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
 	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
@@ -317,8 +325,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	 */
 	recalc_intercepts(svm);
 
-	svm->nested.vmcb = vmcb_gpa;
-
 	/*
 	 * If L1 had a pending IRQ/NMI before executing VMRUN,
 	 * which wasn't delivered because it was disallowed (e.g.
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 08/24] KVM: nSVM: extract preparation of VMCB for nested run
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (6 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 07/24] KVM: nSVM: extract load_nested_vmcb_control Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 09/24] KVM: nSVM: clean up tsc_offset update Paolo Bonzini
                   ` (16 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Split out filling svm->vmcb.save and svm->vmcb.control before VMRUN.
Only the latter will be useful when restoring nested SVM state.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 52 ++++++++++++++++++++++-----------------
 1 file changed, 30 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b1d0d0519664..4f81c2196bf6 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -245,24 +245,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
 	svm->vcpu.arch.tsc_offset += control->tsc_offset;
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-			  struct vmcb *nested_vmcb)
+static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
 {
-	bool evaluate_pending_interrupts =
-		is_intercept(svm, INTERCEPT_VINTR) ||
-		is_intercept(svm, INTERCEPT_IRET);
-
-	svm->nested.vmcb = vmcb_gpa;
-	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
-		svm->vcpu.arch.hflags |= HF_HIF_MASK;
-	else
-		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
-
-	load_nested_vmcb_control(svm, &nested_vmcb->control);
-
-	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
-		nested_svm_init_mmu_context(&svm->vcpu);
-
 	/* Load the nested guest state */
 	svm->vmcb->save.es = nested_vmcb->save.es;
 	svm->vmcb->save.cs = nested_vmcb->save.cs;
@@ -280,9 +264,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	} else
 		(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
 
-	/* Guest paging mode is active - reset mmu */
-	kvm_mmu_reset_context(&svm->vcpu);
-
 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
 	kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
 	kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
@@ -295,6 +276,15 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
 	svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
 	svm->vmcb->save.cpl = nested_vmcb->save.cpl;
+}
+
+static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+{
+	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
+		nested_svm_init_mmu_context(&svm->vcpu);
+
+	/* Guest paging mode is active - reset mmu */
+	kvm_mmu_reset_context(&svm->vcpu);
 
 	svm_flush_tlb(&svm->vcpu);
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
@@ -325,6 +315,26 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	 */
 	recalc_intercepts(svm);
 
+	mark_all_dirty(svm->vmcb);
+}
+
+void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+			  struct vmcb *nested_vmcb)
+{
+	bool evaluate_pending_interrupts =
+		is_intercept(svm, INTERCEPT_VINTR) ||
+		is_intercept(svm, INTERCEPT_IRET);
+
+	svm->nested.vmcb = vmcb_gpa;
+	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
+		svm->vcpu.arch.hflags |= HF_HIF_MASK;
+	else
+		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+
+	load_nested_vmcb_control(svm, &nested_vmcb->control);
+	nested_prepare_vmcb_save(svm, nested_vmcb);
+	nested_prepare_vmcb_control(svm, nested_vmcb);
+
 	/*
 	 * If L1 had a pending IRQ/NMI before executing VMRUN,
 	 * which wasn't delivered because it was disallowed (e.g.
@@ -340,8 +350,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	enable_gif(svm);
 	if (unlikely(evaluate_pending_interrupts))
 		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-
-	mark_all_dirty(svm->vmcb);
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 09/24] KVM: nSVM: clean up tsc_offset update
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (7 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 08/24] KVM: nSVM: extract preparation of VMCB for nested run Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 10/24] KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area Paolo Bonzini
                   ` (15 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Use l1_tsc_offset to compute svm->vcpu.arch.tsc_offset and
svm->vmcb->control.tsc_offset, instead of relying on hsave.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 4f81c2196bf6..2aaa539482ae 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -241,8 +241,6 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
 	svm->nested.intercept_dr         = control->intercept_dr;
 	svm->nested.intercept_exceptions = control->intercept_exceptions;
 	svm->nested.intercept            = control->intercept;
-
-	svm->vcpu.arch.tsc_offset += control->tsc_offset;
 }
 
 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
@@ -292,7 +290,8 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *neste
 	else
 		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
-	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
+	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
+		svm->vcpu.arch.l1_tsc_offset + nested_vmcb->control.tsc_offset;
 
 	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
@@ -557,7 +556,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	/* Restore the original control entries */
 	copy_vmcb_control_area(vmcb, hsave);
 
-	svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
+	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
+		svm->vcpu.arch.l1_tsc_offset;
+
 	kvm_clear_exception_queue(&svm->vcpu);
 	kvm_clear_interrupt_queue(&svm->vcpu);
 
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 10/24] KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (8 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 09/24] KVM: nSVM: clean up tsc_offset update Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 11/24] KVM: nSVM: remove trailing padding for struct vmcb_control_area Paolo Bonzini
                   ` (14 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

This will come in handy when we put a struct vmcb_control_area in
svm->nested.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 2aaa539482ae..c759124ed6af 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -141,11 +141,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
 	c->intercept |= g->intercept;
 }
 
-static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
+static void copy_vmcb_control_area(struct vmcb_control_area *dst,
+				   struct vmcb_control_area *from)
 {
-	struct vmcb_control_area *dst  = &dst_vmcb->control;
-	struct vmcb_control_area *from = &from_vmcb->control;
-
 	dst->intercept_cr         = from->intercept_cr;
 	dst->intercept_dr         = from->intercept_dr;
 	dst->intercept_exceptions = from->intercept_exceptions;
@@ -423,7 +421,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 	else
 		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
 
-	copy_vmcb_control_area(hsave, vmcb);
+	copy_vmcb_control_area(&hsave->control, &vmcb->control);
 
 	svm->nested.nested_run_pending = 1;
 	enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
@@ -554,7 +552,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 		nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
 	/* Restore the original control entries */
-	copy_vmcb_control_area(vmcb, hsave);
+	copy_vmcb_control_area(&vmcb->control, &hsave->control);
 
 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
 		svm->vcpu.arch.l1_tsc_offset;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 11/24] KVM: nSVM: remove trailing padding for struct vmcb_control_area
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (9 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 10/24] KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 12/24] KVM: nSVM: save all control fields in svm->nested Paolo Bonzini
                   ` (13 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Allow placing the VMCB structs on the stack or in other structs without
wasting too much space.  Add BUILD_BUG_ON as a quick safeguard against typos.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/svm.h | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 6ece8561ba66..8a1f5382a4ea 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -96,7 +96,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u8 reserved_6[8];	/* Offset 0xe8 */
 	u64 avic_logical_id;	/* Offset 0xf0 */
 	u64 avic_physical_id;	/* Offset 0xf8 */
-	u8 reserved_7[768];
 };
 
 
@@ -203,8 +202,16 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
 	u64 last_excp_to;
 };
 
+
+static inline void __unused_size_checks(void)
+{
+	BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298);
+	BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256);
+}
+
 struct __attribute__ ((__packed__)) vmcb {
 	struct vmcb_control_area control;
+	u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
 	struct vmcb_save_area save;
 };
 
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 12/24] KVM: nSVM: save all control fields in svm->nested
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (10 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 11/24] KVM: nSVM: remove trailing padding for struct vmcb_control_area Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 13/24] KVM: nSVM: do not reload pause filter fields from VMCB Paolo Bonzini
                   ` (12 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

In preparation for nested SVM save/restore, store all data that matters
from the VMCB control area into svm->nested.  It will then become part
of the nested SVM state that is saved by KVM_SET_NESTED_STATE and
restored by KVM_GET_NESTED_STATE, just like the cached vmcs12 for nVMX.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 73 +++++++++++++++++----------------------
 arch/x86/kvm/svm/svm.c    | 10 ++++--
 arch/x86/kvm/svm/svm.h    | 20 +++--------
 3 files changed, 45 insertions(+), 58 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index c759124ed6af..9999bce9adcf 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -60,7 +60,7 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u64 cr3 = svm->nested.nested_cr3;
+	u64 cr3 = svm->nested.ctl.nested_cr3;
 	u64 pdpte;
 	int ret;
 
@@ -75,7 +75,7 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	return svm->nested.nested_cr3;
+	return svm->nested.ctl.nested_cr3;
 }
 
 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
@@ -100,8 +100,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
 
 void recalc_intercepts(struct vcpu_svm *svm)
 {
-	struct vmcb_control_area *c, *h;
-	struct nested_state *g;
+	struct vmcb_control_area *c, *h, *g;
 
 	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -110,7 +109,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
 
 	c = &svm->vmcb->control;
 	h = &svm->nested.hsave->control;
-	g = &svm->nested;
+	g = &svm->nested.ctl;
 
 	svm->nested.host_intercept_exceptions = h->intercept_exceptions;
 
@@ -180,7 +179,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 	 */
 	int i;
 
-	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
 		return true;
 
 	for (i = 0; i < MSRPM_OFFSETS; i++) {
@@ -191,7 +190,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 			break;
 
 		p      = msrpm_offsets[i];
-		offset = svm->nested.vmcb_msrpm + (p * 4);
+		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
 
 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
 			return false;
@@ -229,16 +228,10 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
 				     struct vmcb_control_area *control)
 {
-	svm->nested.nested_cr3 = control->nested_cr3;
+	copy_vmcb_control_area(&svm->nested.ctl, control);
 
-	svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL;
-	svm->nested.vmcb_iopm  = control->iopm_base_pa  & ~0x0fffULL;
-
-	/* cache intercepts */
-	svm->nested.intercept_cr         = control->intercept_cr;
-	svm->nested.intercept_dr         = control->intercept_dr;
-	svm->nested.intercept_exceptions = control->intercept_exceptions;
-	svm->nested.intercept            = control->intercept;
+	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
+	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
 }
 
 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
@@ -274,34 +267,32 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
 	svm->vmcb->save.cpl = nested_vmcb->save.cpl;
 }
 
-static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 {
-	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
+	if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
 		nested_svm_init_mmu_context(&svm->vcpu);
 
 	/* Guest paging mode is active - reset mmu */
 	kvm_mmu_reset_context(&svm->vcpu);
 
 	svm_flush_tlb(&svm->vcpu);
-	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
+	if (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
 	else
 		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
-		svm->vcpu.arch.l1_tsc_offset + nested_vmcb->control.tsc_offset;
+		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
 
-	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
-	svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
-	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
-	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
-	svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
-	svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
+	svm->vmcb->control.int_ctl             = svm->nested.ctl.int_ctl | V_INTR_MASKING_MASK;
+	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
+	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
+	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
+	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
+	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
 
-	svm->vmcb->control.pause_filter_count =
-		nested_vmcb->control.pause_filter_count;
-	svm->vmcb->control.pause_filter_thresh =
-		nested_vmcb->control.pause_filter_thresh;
+	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
+	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
 
 	/* Enter Guest-Mode */
 	enter_guest_mode(&svm->vcpu);
@@ -330,7 +321,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 
 	load_nested_vmcb_control(svm, &nested_vmcb->control);
 	nested_prepare_vmcb_save(svm, nested_vmcb);
-	nested_prepare_vmcb_control(svm, nested_vmcb);
+	nested_prepare_vmcb_control(svm);
 
 	/*
 	 * If L1 had a pending IRQ/NMI before executing VMRUN,
@@ -560,7 +551,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	kvm_clear_exception_queue(&svm->vcpu);
 	kvm_clear_interrupt_queue(&svm->vcpu);
 
-	svm->nested.nested_cr3 = 0;
+	svm->nested.ctl.nested_cr3 = 0;
 
 	/* Restore selected save entries */
 	svm->vmcb->save.es = hsave->save.es;
@@ -610,7 +601,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 	u32 offset, msr, value;
 	int write, mask;
 
-	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
 		return NESTED_EXIT_HOST;
 
 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
@@ -624,7 +615,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 	/* Offset is in 32 bit units but need in 8 bit units */
 	offset *= 4;
 
-	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
+	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
 		return NESTED_EXIT_DONE;
 
 	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
@@ -637,13 +628,13 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 	u8 start_bit;
 	u64 gpa;
 
-	if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
+	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
 		return NESTED_EXIT_HOST;
 
 	port = svm->vmcb->control.exit_info_1 >> 16;
 	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
 		SVM_IOIO_SIZE_SHIFT;
-	gpa  = svm->nested.vmcb_iopm + (port / 8);
+	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
 	start_bit = port % 8;
 	iopm_len = (start_bit + size > 8) ? 2 : 1;
 	mask = (0xf >> (4 - size)) << start_bit;
@@ -669,13 +660,13 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
 		break;
 	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
 		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
-		if (svm->nested.intercept_cr & bit)
+		if (svm->nested.ctl.intercept_cr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
 	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
 		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
-		if (svm->nested.intercept_dr & bit)
+		if (svm->nested.ctl.intercept_dr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
@@ -694,7 +685,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
 	}
 	default: {
 		u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
-		if (svm->nested.intercept & exit_bits)
+		if (svm->nested.ctl.intercept & exit_bits)
 			vmexit = NESTED_EXIT_DONE;
 	}
 	}
@@ -734,7 +725,7 @@ static bool nested_exit_on_exception(struct vcpu_svm *svm)
 {
 	unsigned int nr = svm->vcpu.arch.exception.nr;
 
-	return (svm->nested.intercept_exceptions & (1 << nr));
+	return (svm->nested.ctl.intercept_exceptions & (1 << nr));
 }
 
 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
@@ -795,7 +786,7 @@ static void nested_svm_intr(struct vcpu_svm *svm)
 
 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
 {
-	return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
+	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT));
 }
 
 static void nested_svm_init(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 47c565338426..ffb349739e0c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2173,7 +2173,7 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
 	bool ret = false;
 	u64 intercept;
 
-	intercept = svm->nested.intercept;
+	intercept = svm->nested.ctl.intercept;
 
 	if (!is_guest_mode(&svm->vcpu) ||
 	    (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
@@ -3320,6 +3320,12 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
+	if (unlikely(svm->nested.nested_run_pending)) {
+		/* After this vmentry, these fields will be used up.  */
+		svm->nested.ctl.event_inj     = 0;
+		svm->nested.ctl.event_inj_err = 0;
+	}
+
 	/*
 	 * Disable singlestep if we're injecting an interrupt/exception.
 	 * We don't want our modified rflags to be pushed on the stack where
@@ -3655,7 +3661,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
 		    info->intercept == x86_intercept_clts)
 			break;
 
-		intercept = svm->nested.intercept;
+		intercept = svm->nested.ctl.intercept;
 
 		if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
 			break;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 33e3f09d7a8e..dd5418f20256 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -91,22 +91,12 @@ struct nested_state {
 	/* These are the merged vectors */
 	u32 *msrpm;
 
-	/* gpa pointers to the real vectors */
-	u64 vmcb_msrpm;
-	u64 vmcb_iopm;
-
 	/* A VMRUN has started but has not yet been performed, so
 	 * we cannot inject a nested vmexit yet.  */
 	bool nested_run_pending;
 
-	/* cache for intercepts of the guest */
-	u32 intercept_cr;
-	u32 intercept_dr;
-	u32 intercept_exceptions;
-	u64 intercept;
-
-	/* Nested Paging related state */
-	u64 nested_cr3;
+	/* cache for control fields of the guest */
+	struct vmcb_control_area ctl;
 };
 
 struct vcpu_svm {
@@ -381,17 +371,17 @@ static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
 
 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
 {
-	return (svm->nested.intercept & (1ULL << INTERCEPT_SMI));
+	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_SMI));
 }
 
 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
 {
-	return (svm->nested.intercept & (1ULL << INTERCEPT_INTR));
+	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INTR));
 }
 
 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
 {
-	return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));
+	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
 }
 
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 13/24] KVM: nSVM: do not reload pause filter fields from VMCB
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (11 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 12/24] KVM: nSVM: save all control fields in svm->nested Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 14/24] KVM: nSVM: remove HF_VINTR_MASK Paolo Bonzini
                   ` (11 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

These fields do not change from VMRUN to VMEXIT; there is no need
to reload them on nested VMEXIT.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9999bce9adcf..b6a1e1ff271e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -533,11 +533,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	nested_vmcb->control.event_inj         = 0;
 	nested_vmcb->control.event_inj_err     = 0;
 
-	nested_vmcb->control.pause_filter_count =
-		svm->vmcb->control.pause_filter_count;
-	nested_vmcb->control.pause_filter_thresh =
-		svm->vmcb->control.pause_filter_thresh;
-
 	/* We always set V_INTR_MASKING and remember the old value in hflags */
 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
 		nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 14/24] KVM: nSVM: remove HF_VINTR_MASK
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (12 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 13/24] KVM: nSVM: do not reload pause filter fields from VMCB Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 15/24] KVM: nSVM: remove HF_HIF_MASK Paolo Bonzini
                   ` (10 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Now that the int_ctl field is stored in svm->nested.ctl.int_ctl, we can
use it instead of vcpu->arch.hflags to check whether L2 is running
in V_INTR_MASKING mode.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/svm/nested.c       | 10 +++-------
 arch/x86/kvm/svm/svm.c          |  2 +-
 arch/x86/kvm/svm/svm.h          |  4 +++-
 4 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index db261da578f3..f1c4cb2d0541 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1594,7 +1594,6 @@ enum {
 
 #define HF_GIF_MASK		(1 << 0)
 #define HF_HIF_MASK		(1 << 1)
-#define HF_VINTR_MASK		(1 << 2)
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
 #define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b6a1e1ff271e..9e4d24acadd7 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -118,7 +118,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
 	c->intercept_exceptions = h->intercept_exceptions;
 	c->intercept = h->intercept;
 
-	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+	if (g->int_ctl & V_INTR_MASKING_MASK) {
 		/* We only want the cr8 intercept bits of L1 */
 		c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
 		c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
@@ -276,10 +276,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 	kvm_mmu_reset_context(&svm->vcpu);
 
 	svm_flush_tlb(&svm->vcpu);
-	if (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
-		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
-	else
-		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
 		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
@@ -533,8 +529,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	nested_vmcb->control.event_inj         = 0;
 	nested_vmcb->control.event_inj_err     = 0;
 
-	/* We always set V_INTR_MASKING and remember the old value in hflags */
-	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
+	/* We always set V_INTR_MASKING and remember the old value in svm->nested */
+	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK))
 		nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
 	/* Restore the original control entries */
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ffb349739e0c..10c80c13b679 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3080,7 +3080,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 
 	if (is_guest_mode(vcpu)) {
 		/* As long as interrupts are being delivered...  */
-		if ((svm->vcpu.arch.hflags & HF_VINTR_MASK)
+		if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
 		    ? !(svm->vcpu.arch.hflags & HF_HIF_MASK)
 		    : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
 			return true;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index dd5418f20256..73986f96ba2a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -366,7 +366,9 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
 
 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
 {
-	return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
 }
 
 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 15/24] KVM: nSVM: remove HF_HIF_MASK
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (13 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 14/24] KVM: nSVM: remove HF_VINTR_MASK Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 16/24] KVM: nSVM: split nested_vmcb_check_controls Paolo Bonzini
                   ` (9 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

The L1 flags can be found in the save area of svm->nested.hsave, fish
it from there so that there is one fewer thing to migrate.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 1 -
 arch/x86/kvm/svm/nested.c       | 5 -----
 arch/x86/kvm/svm/svm.c          | 2 +-
 3 files changed, 1 insertion(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f1c4cb2d0541..019dafaddc1f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1593,7 +1593,6 @@ enum {
 };
 
 #define HF_GIF_MASK		(1 << 0)
-#define HF_HIF_MASK		(1 << 1)
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
 #define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9e4d24acadd7..cbe96a08b080 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -310,11 +310,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 		is_intercept(svm, INTERCEPT_IRET);
 
 	svm->nested.vmcb = vmcb_gpa;
-	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
-		svm->vcpu.arch.hflags |= HF_HIF_MASK;
-	else
-		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
-
 	load_nested_vmcb_control(svm, &nested_vmcb->control);
 	nested_prepare_vmcb_save(svm, nested_vmcb);
 	nested_prepare_vmcb_control(svm);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 10c80c13b679..6a19cb3e3b1f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3081,7 +3081,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 	if (is_guest_mode(vcpu)) {
 		/* As long as interrupts are being delivered...  */
 		if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
-		    ? !(svm->vcpu.arch.hflags & HF_HIF_MASK)
+		    ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF)
 		    : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
 			return true;
 
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 16/24] KVM: nSVM: split nested_vmcb_check_controls
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (14 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 15/24] KVM: nSVM: remove HF_HIF_MASK Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 17/24] KVM: nSVM: do all MMU switch work in init/uninit functions Paolo Bonzini
                   ` (8 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

The authoritative state does not come from the VMCB once in guest mode,
but KVM_SET_NESTED_STATE can still perform checks on L1's provided SVM
controls because we get them from userspace.

Therefore, split out a function to do them.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 23 ++++++++++++++---------
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index cbe96a08b080..024e27bebba3 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -203,26 +203,31 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 	return true;
 }
 
-static bool nested_vmcb_checks(struct vmcb *vmcb)
+static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 {
-	if ((vmcb->save.efer & EFER_SVME) == 0)
+	if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
 		return false;
 
-	if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
-	    (vmcb->save.cr0 & X86_CR0_NW))
+	if (control->asid == 0)
 		return false;
 
-	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
+	if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
+	    !npt_enabled)
 		return false;
 
-	if (vmcb->control.asid == 0)
+	return true;
+}
+
+static bool nested_vmcb_checks(struct vmcb *vmcb)
+{
+	if ((vmcb->save.efer & EFER_SVME) == 0)
 		return false;
 
-	if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
-	    !npt_enabled)
+	if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
+	    (vmcb->save.cr0 & X86_CR0_NW))
 		return false;
 
-	return true;
+	return nested_vmcb_check_controls(&vmcb->control);
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 17/24] KVM: nSVM: do all MMU switch work in init/uninit functions
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (15 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 16/24] KVM: nSVM: split nested_vmcb_check_controls Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 18/24] KVM: nSVM: leave guest mode when clearing EFER.SVME Paolo Bonzini
                   ` (7 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Move the kvm_mmu_reset_context calls to nested_svm_init_mmu_context and
nested_svm_uninit_mmu_context, so that the state of the MMU is consistent
with the vcpu->arch.mmu and vcpu->arch.walk_mmu state.  Remove an
unnecessary kvm_mmu_load, which can wait until the first vcpu_run.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 024e27bebba3..54a3384a60f8 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -90,12 +90,17 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 	vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
+
+	/* Guest paging mode is active - reset mmu */
+	kvm_mmu_reset_context(vcpu);
 }
 
 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+
+	kvm_mmu_reset_context(vcpu);
 }
 
 void recalc_intercepts(struct vcpu_svm *svm)
@@ -277,9 +282,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 	if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
 		nested_svm_init_mmu_context(&svm->vcpu);
 
-	/* Guest paging mode is active - reset mmu */
-	kvm_mmu_reset_context(&svm->vcpu);
-
 	svm_flush_tlb(&svm->vcpu);
 
 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
@@ -573,8 +575,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
 
 	nested_svm_uninit_mmu_context(&svm->vcpu);
-	kvm_mmu_reset_context(&svm->vcpu);
-	kvm_mmu_load(&svm->vcpu);
 
 	/*
 	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 18/24] KVM: nSVM: leave guest mode when clearing EFER.SVME
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (16 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 17/24] KVM: nSVM: do all MMU switch work in init/uninit functions Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 19/24] KVM: nSVM: extract svm_set_gif Paolo Bonzini
                   ` (6 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

According to the AMD manual, the effect of turning off EFER.SVME while a
guest is running is undefined.  We make it leave guest mode immediately,
similar to the effect of clearing the VMX bit in MSR_IA32_FEAT_CTL.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 16 ++++++++++++++++
 arch/x86/kvm/svm/svm.c    |  8 ++++++--
 arch/x86/kvm/svm/svm.h    |  1 +
 3 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 54a3384a60f8..3e37410d0b94 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -587,6 +587,22 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	return 0;
 }
 
+/*
+ * Forcibly leave nested mode in order to be able to reset the VCPU later on.
+ */
+void svm_leave_nested(struct vcpu_svm *svm)
+{
+	if (is_guest_mode(&svm->vcpu)) {
+		struct vmcb *hsave = svm->nested.hsave;
+		struct vmcb *vmcb = svm->vmcb;
+
+		svm->nested.nested_run_pending = 0;
+		leave_guest_mode(&svm->vcpu);
+		copy_vmcb_control_area(&vmcb->control, &hsave->control);
+		nested_svm_uninit_mmu_context(&svm->vcpu);
+	}
+}
+
 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 {
 	u32 offset, msr, value;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6a19cb3e3b1f..09b345892fc9 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -265,6 +265,7 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
 
 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
+	struct vcpu_svm *svm = to_svm(vcpu);
 	vcpu->arch.efer = efer;
 
 	if (!npt_enabled) {
@@ -275,8 +276,11 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 			efer &= ~EFER_LME;
 	}
 
-	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
-	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+	if (!(efer & EFER_SVME))
+		svm_leave_nested(svm);
+
+	svm->vmcb->save.efer = efer | EFER_SVME;
+	mark_dirty(svm->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 73986f96ba2a..4d57270cac3f 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -388,6 +388,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
 
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 			  struct vmcb *nested_vmcb);
+void svm_leave_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct vcpu_svm *svm);
 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 19/24] KVM: nSVM: extract svm_set_gif
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (17 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 18/24] KVM: nSVM: leave guest mode when clearing EFER.SVME Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 20/24] KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu Paolo Bonzini
                   ` (5 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Extract the code that is needed to implement CLGI and STGI,
so that we can run it from VMRUN and vmexit (and in the future,
KVM_SET_NESTED_STATE).  Skip the request for KVM_REQ_EVENT unless needed,
subsuming the evaluate_pending_interrupts optimization that is found
in enter_svm_guest_mode.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/irq.c        |  1 +
 arch/x86/kvm/svm/nested.c | 22 ++------------------
 arch/x86/kvm/svm/svm.c    | 44 +++++++++++++++++++++++----------------
 arch/x86/kvm/svm/svm.h    |  1 +
 4 files changed, 30 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 54f7ea68083b..99d118ffc67d 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -83,6 +83,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
 
 	return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
 }
+EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
 
 /*
  * check if there is pending interrupt without
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 3e37410d0b94..a4a9516ff8b5 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -312,30 +312,12 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 			  struct vmcb *nested_vmcb)
 {
-	bool evaluate_pending_interrupts =
-		is_intercept(svm, INTERCEPT_VINTR) ||
-		is_intercept(svm, INTERCEPT_IRET);
-
 	svm->nested.vmcb = vmcb_gpa;
 	load_nested_vmcb_control(svm, &nested_vmcb->control);
 	nested_prepare_vmcb_save(svm, nested_vmcb);
 	nested_prepare_vmcb_control(svm);
 
-	/*
-	 * If L1 had a pending IRQ/NMI before executing VMRUN,
-	 * which wasn't delivered because it was disallowed (e.g.
-	 * interrupts disabled), L0 needs to evaluate if this pending
-	 * event should cause an exit from L2 to L1 or be delivered
-	 * directly to L2.
-	 *
-	 * Usually this would be handled by the processor noticing an
-	 * IRQ/NMI window request.  However, VMRUN can unblock interrupts
-	 * by implicitly setting GIF, so force L0 to perform pending event
-	 * evaluation by requesting a KVM_REQ_EVENT.
-	 */
-	enable_gif(svm);
-	if (unlikely(evaluate_pending_interrupts))
-		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	svm_set_gif(svm, true);
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)
@@ -478,7 +460,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
 	/* Give the current vmcb to the guest */
-	disable_gif(svm);
+	svm_set_gif(svm, false);
 
 	nested_vmcb->save.es     = vmcb->save.es;
 	nested_vmcb->save.cs     = vmcb->save.cs;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 09b345892fc9..d8187d25fe04 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1977,6 +1977,30 @@ static int vmrun_interception(struct vcpu_svm *svm)
 	return nested_svm_vmrun(svm);
 }
 
+void svm_set_gif(struct vcpu_svm *svm, bool value)
+{
+	if (value) {
+		/*
+		 * If VGIF is enabled, the STGI intercept is only added to
+		 * detect the opening of the SMI/NMI window; remove it now.
+		 */
+		if (vgif_enabled(svm))
+			clr_intercept(svm, INTERCEPT_STGI);
+
+		enable_gif(svm);
+		if (svm->vcpu.arch.smi_pending ||
+		    svm->vcpu.arch.nmi_pending ||
+		    kvm_cpu_has_injectable_intr(&svm->vcpu))
+			kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	} else {
+		disable_gif(svm);
+
+		/* After a CLGI no interrupts should come */
+		if (!kvm_vcpu_apicv_active(&svm->vcpu))
+			svm_clear_vintr(svm);
+	}
+}
+
 static int stgi_interception(struct vcpu_svm *svm)
 {
 	int ret;
@@ -1984,18 +2008,8 @@ static int stgi_interception(struct vcpu_svm *svm)
 	if (nested_svm_check_permissions(svm))
 		return 1;
 
-	/*
-	 * If VGIF is enabled, the STGI intercept is only added to
-	 * detect the opening of the SMI/NMI window; remove it now.
-	 */
-	if (vgif_enabled(svm))
-		clr_intercept(svm, INTERCEPT_STGI);
-
 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
-	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-
-	enable_gif(svm);
-
+	svm_set_gif(svm, true);
 	return ret;
 }
 
@@ -2007,13 +2021,7 @@ static int clgi_interception(struct vcpu_svm *svm)
 		return 1;
 
 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
-
-	disable_gif(svm);
-
-	/* After a CLGI no interrupts should come */
-	if (!kvm_vcpu_apicv_active(&svm->vcpu))
-		svm_clear_vintr(svm);
-
+	svm_set_gif(svm, false);
 	return ret;
 }
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 4d57270cac3f..6733f9036499 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -357,6 +357,7 @@ void disable_nmi_singlestep(struct vcpu_svm *svm);
 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
+void svm_set_gif(struct vcpu_svm *svm, bool value);
 
 /* nested.c */
 
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 20/24] KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (18 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 19/24] KVM: nSVM: extract svm_set_gif Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 21/24] KVM: x86: always update CR3 in VMCB Paolo Bonzini
                   ` (4 subsequent siblings)
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

This allows fetching the registers from the hsave area when setting
up the NPT shadow MMU, and is needed for KVM_SET_NESTED_STATE (which
runs long after the CR0, CR4 and EFER values in vcpu have been switched
to hold L2 guest state).

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu.h        |  2 +-
 arch/x86/kvm/mmu/mmu.c    | 14 +++++++++-----
 arch/x86/kvm/svm/nested.c |  5 ++++-
 3 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 8a3b1bce722a..45c1ae872a34 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -57,7 +57,7 @@ void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 			     bool accessed_dirty, gpa_t new_eptp);
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d93cb3ad8f03..50ae99ee32df 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4954,7 +4954,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 	return role;
 }
 
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
 {
 	struct kvm_mmu *context = vcpu->arch.mmu;
 	union kvm_mmu_role new_role =
@@ -4963,11 +4963,11 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 	if (new_role.as_u64 == context->mmu_role.as_u64)
 		return;
 
-	if (!is_paging(vcpu))
+	if (!(cr0 & X86_CR0_PG))
 		nonpaging_init_context(vcpu, context);
-	else if (is_long_mode(vcpu))
+	else if (efer & EFER_LMA)
 		paging64_init_context(vcpu, context);
-	else if (is_pae(vcpu))
+	else if (cr4 & X86_CR4_PAE)
 		paging32E_init_context(vcpu, context);
 	else
 		paging32_init_context(vcpu, context);
@@ -5045,7 +5045,11 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu *context = vcpu->arch.mmu;
 
-	kvm_init_shadow_mmu(vcpu);
+	kvm_init_shadow_mmu(vcpu,
+			    kvm_read_cr0_bits(vcpu, X86_CR0_PG),
+			    kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
+			    vcpu->arch.efer);
+
 	context->get_guest_pgd     = get_cr3;
 	context->get_pdptr         = kvm_pdptr_read;
 	context->inject_page_fault = kvm_inject_page_fault;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index a4a9516ff8b5..19b6a7c954e8 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -80,10 +80,13 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
 
 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 {
+	struct vcpu_svm *svm = to_svm(vcpu);
+	struct vmcb *hsave = svm->nested.hsave;
+
 	WARN_ON(mmu_is_nested(vcpu));
 
 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
-	kvm_init_shadow_mmu(vcpu);
+	kvm_init_shadow_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer);
 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (19 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 20/24] KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 18:22   ` Sean Christopherson
  2020-05-20 18:24   ` Sean Christopherson
  2020-05-20 17:21 ` [PATCH 22/24] uaccess: add memzero_user Paolo Bonzini
                   ` (3 subsequent siblings)
  24 siblings, 2 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

vmx_load_mmu_pgd is delaying the write of GUEST_CR3 to prepare_vmcs02 as
an optimization, but this is only correct before the nested vmentry.
If userspace is modifying CR3 with KVM_SET_SREGS after the VM has
already been put in guest mode, the value of CR3 will not be updated.
Remove the optimization, which almost never triggers anyway.

This also applies to SVM, where the code was added in commit 689f3bf21628
("KVM: x86: unify callbacks to load paging root", 2020-03-16) just to keep the
two vendor-specific modules closer.

Fixes: 04f11ef45810 ("KVM: nVMX: Always write vmcs02.GUEST_CR3 during nested VM-Enter")
Fixes: 689f3bf21628 ("KVM: x86: unify callbacks to load paging root")
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c |  6 +-----
 arch/x86/kvm/svm/svm.c    | 16 +++++-----------
 arch/x86/kvm/vmx/vmx.c    |  5 +----
 3 files changed, 7 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 19b6a7c954e8..087a04ae74e4 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -260,11 +260,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
 	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
 	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
 	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
-	if (npt_enabled) {
-		svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
-		svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
-	} else
-		(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
+	(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
 
 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
 	kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d8187d25fe04..56be704ffe95 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3465,7 +3465,6 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	bool update_guest_cr3 = true;
 	unsigned long cr3;
 
 	cr3 = __sme_set(root);
@@ -3474,18 +3473,13 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
 		mark_dirty(svm->vmcb, VMCB_NPT);
 
 		/* Loading L2's CR3 is handled by enter_svm_guest_mode.  */
-		if (is_guest_mode(vcpu))
-			update_guest_cr3 = false;
-		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
-			cr3 = vcpu->arch.cr3;
-		else /* CR3 is already up-to-date.  */
-			update_guest_cr3 = false;
+		if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+			return;
+		cr3 = vcpu->arch.cr3;
 	}
 
-	if (update_guest_cr3) {
-		svm->vmcb->save.cr3 = cr3;
-		mark_dirty(svm->vmcb, VMCB_CR);
-	}
+	svm->vmcb->save.cr3 = cr3;
+	mark_dirty(svm->vmcb, VMCB_CR);
 }
 
 static int is_disabled(void)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 55712dd86baf..7daf6a50e774 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3085,10 +3085,7 @@ void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
 			spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
 		}
 
-		/* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
-		if (is_guest_mode(vcpu))
-			update_guest_cr3 = false;
-		else if (!enable_unrestricted_guest && !is_paging(vcpu))
+		if (!enable_unrestricted_guest && !is_paging(vcpu))
 			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
 		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
 			guest_cr3 = vcpu->arch.cr3;
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (20 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 21/24] KVM: x86: always update CR3 in VMCB Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 20:40   ` Christoph Hellwig
  2020-05-20 17:21 ` [PATCH 23/24] selftests: kvm: add a SVM version of state-test Paolo Bonzini
                   ` (2 subsequent siblings)
  24 siblings, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

This will be used from KVM.  Add it to lib/ so that everyone can use it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 include/linux/uaccess.h |  1 +
 lib/usercopy.c          | 63 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 64 insertions(+)

diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 67f016010aad..bd8c85b50e67 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -232,6 +232,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
 
 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+extern __must_check int memzero_user(void __user *from, size_t size);
 
 /**
  * copy_struct_from_user: copy a struct from userspace
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9ec00f2..82997862bf02 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -33,6 +33,69 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 EXPORT_SYMBOL(_copy_to_user);
 #endif
 
+/**
+ * memzero_user: write zero bytes to a userspace buffer
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memset(from, 0, size)" for
+ * userspace addresses.
+ *
+ * Returns:
+ *  * 0: zeroes have been written to the buffer
+ *  * -EFAULT: access to userspace failed.
+ */
+int memzero_user(void __user *from, size_t size)
+{
+	unsigned long val = 0;
+	unsigned long mask = 0;
+	uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+	if (unlikely(size == 0))
+		return 0;
+
+	from -= align;
+	size += align;
+
+	if (!user_access_begin(from, ALIGN_UP(size, sizeof(unsigned long))))
+		return -EFAULT;
+
+	if (align) {
+		unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+		/* Prepare a mask to keep the first "align" bytes.  */
+		mask = aligned_byte_mask(align);
+	}
+
+	if (size >= sizeof(unsigned long)) {
+		/* The mask only applies to the first full word.  */
+		val &= mask;
+		mask = 0;
+		do {
+			unsafe_put_user(val, (unsigned long __user *) from, err_fault);
+			from += sizeof(unsigned long);
+			size -= sizeof(unsigned long);
+			val = 0;
+		} while (size >= sizeof(unsigned long));
+
+		if (!size)
+			goto done;
+		unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+	}
+
+	/* Bytes after the first "size" have to be kept too. */
+	mask |= ~aligned_byte_mask(size);
+	val &= mask;
+	unsafe_put_user(val, (unsigned long __user *) from, err_fault);
+
+done:
+	user_access_end();
+	return 0;
+err_fault:
+	user_access_end();
+	return -EFAULT;
+}
+EXPORT_SYMBOL(memzero_user);
+
 /**
  * check_zeroed_user: check if a userspace buffer only contains zero bytes
  * @from: Source address, in userspace.
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 23/24] selftests: kvm: add a SVM version of state-test
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (21 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 22/24] uaccess: add memzero_user Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 17:21 ` [PATCH 24/24] KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE Paolo Bonzini
  2020-05-20 19:24 ` [PATCH 00/24] KVM: nSVM: event fixes and migration support Maxim Levitsky
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

The test is similar to the existing one for VMX, but simpler because we
don't have to test shadow VMCS or vmptrld/vmptrst/vmclear.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 .../testing/selftests/kvm/x86_64/state_test.c | 69 +++++++++++++++----
 1 file changed, 57 insertions(+), 12 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 5b1a016edf55..af8b6df6a13e 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -18,14 +18,46 @@
 #include "kvm_util.h"
 #include "processor.h"
 #include "vmx.h"
+#include "svm_util.h"
 
 #define VCPU_ID		5
+#define L2_GUEST_STACK_SIZE 256
 
-void l2_guest_code(void)
+void svm_l2_guest_code(void)
+{
+	GUEST_SYNC(4);
+	/* Exit to L1 */
+	vmcall();
+	GUEST_SYNC(6);
+	/* Done, exit to L1 and never come back.  */
+	vmcall();
+}
+
+static void svm_l1_guest_code(struct svm_test_data *svm)
+{
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+	struct vmcb *vmcb = svm->vmcb;
+
+	GUEST_ASSERT(svm->vmcb_gpa);
+	/* Prepare for L2 execution. */
+	generic_svm_setup(svm, svm_l2_guest_code,
+			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+	GUEST_SYNC(3);
+	run_guest(vmcb, svm->vmcb_gpa);
+	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+	GUEST_SYNC(5);
+	vmcb->save.rip += 3;
+	run_guest(vmcb, svm->vmcb_gpa);
+	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+	GUEST_SYNC(7);
+}
+
+void vmx_l2_guest_code(void)
 {
 	GUEST_SYNC(6);
 
-        /* Exit to L1 */
+	/* Exit to L1 */
 	vmcall();
 
 	/* L1 has now set up a shadow VMCS for us.  */
@@ -42,10 +74,9 @@ void l2_guest_code(void)
 	vmcall();
 }
 
-void l1_guest_code(struct vmx_pages *vmx_pages)
+static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
 {
-#define L2_GUEST_STACK_SIZE 64
-        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
 	GUEST_ASSERT(vmx_pages->vmcs_gpa);
 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
@@ -56,7 +87,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 	GUEST_SYNC(4);
 	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
 
-	prepare_vmcs(vmx_pages, l2_guest_code,
+	prepare_vmcs(vmx_pages, vmx_l2_guest_code,
 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 
 	GUEST_SYNC(5);
@@ -106,20 +137,31 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 	GUEST_ASSERT(vmresume());
 }
 
-void guest_code(struct vmx_pages *vmx_pages)
+static u32 cpuid_ecx(u32 eax)
+{
+	u32 ecx;
+	asm volatile("cpuid" : "=a" (eax), "=c" (ecx) : "0" (eax) : "ebx", "edx");
+	return ecx;
+}
+
+static void __attribute__((__flatten__)) guest_code(void *arg)
 {
 	GUEST_SYNC(1);
 	GUEST_SYNC(2);
 
-	if (vmx_pages)
-		l1_guest_code(vmx_pages);
+	if (arg) {
+		if (cpuid_ecx(0x80000001) & CPUID_SVM)
+			svm_l1_guest_code(arg);
+		else
+			vmx_l1_guest_code(arg);
+	}
 
 	GUEST_DONE();
 }
 
 int main(int argc, char *argv[])
 {
-	vm_vaddr_t vmx_pages_gva = 0;
+	vm_vaddr_t nested_gva = 0;
 
 	struct kvm_regs regs1, regs2;
 	struct kvm_vm *vm;
@@ -136,8 +178,11 @@ int main(int argc, char *argv[])
 	vcpu_regs_get(vm, VCPU_ID, &regs1);
 
 	if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
-		vcpu_alloc_vmx(vm, &vmx_pages_gva);
-		vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+		if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
+			vcpu_alloc_svm(vm, &nested_gva);
+		else
+			vcpu_alloc_vmx(vm, &nested_gva);
+		vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
 	} else {
 		pr_info("will skip nested state checks\n");
 		vcpu_args_set(vm, VCPU_ID, 1, 0);
-- 
2.18.2



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 24/24] KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (22 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 23/24] selftests: kvm: add a SVM version of state-test Paolo Bonzini
@ 2020-05-20 17:21 ` Paolo Bonzini
  2020-05-20 19:24 ` [PATCH 00/24] KVM: nSVM: event fixes and migration support Maxim Levitsky
  24 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 17:21 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

Similar to VMX, the state that is captured through the currently available
IOCTLs is a mix of L1 and L2 state, dependent on whether the L2 guest was
running at the moment when the process was interrupted to save its state.

In particular, the SVM-specific state for nested virtualization includes
the L1 saved state (including the interrupt flag), the cached L2 controls,
and the GIF.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/uapi/asm/kvm.h |  17 +++-
 arch/x86/kvm/cpuid.h            |   5 ++
 arch/x86/kvm/svm/nested.c       | 147 ++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c          |   1 +
 arch/x86/kvm/vmx/nested.c       |   5 --
 arch/x86/kvm/x86.c              |   3 +-
 6 files changed, 171 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 3f3f780c8c65..12075a9de1c1 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -385,18 +385,22 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
 
 #define KVM_STATE_NESTED_FORMAT_VMX	0
-#define KVM_STATE_NESTED_FORMAT_SVM	1	/* unused */
+#define KVM_STATE_NESTED_FORMAT_SVM	1
 
 #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
 #define KVM_STATE_NESTED_EVMCS		0x00000004
 #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
+#define KVM_STATE_NESTED_GIF_SET	0x00000100
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
 
 #define KVM_STATE_NESTED_VMX_VMCS_SIZE	0x1000
 
+#define KVM_STATE_NESTED_SVM_VMCB_SIZE	0x1000
+
+
 struct kvm_vmx_nested_state_data {
 	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
 	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -411,6 +415,15 @@ struct kvm_vmx_nested_state_hdr {
 	} smm;
 };
 
+struct kvm_svm_nested_state_data {
+	/* Save area only used if KVM_STATE_NESTED_RUN_PENDING.  */
+	__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
+};
+
+struct kvm_svm_nested_state_hdr {
+	__u64 vmcb_pa;
+};
+
 /* for KVM_CAP_NESTED_STATE */
 struct kvm_nested_state {
 	__u16 flags;
@@ -419,6 +432,7 @@ struct kvm_nested_state {
 
 	union {
 		struct kvm_vmx_nested_state_hdr vmx;
+		struct kvm_svm_nested_state_hdr svm;
 
 		/* Pad the header to 128 bytes.  */
 		__u8 pad[120];
@@ -431,6 +445,7 @@ struct kvm_nested_state {
 	 */
 	union {
 		struct kvm_vmx_nested_state_data vmx[0];
+		struct kvm_svm_nested_state_data svm[0];
 	} data;
 };
 
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 63a70f6a3df3..05434cd9342f 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -303,4 +303,9 @@ static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
 		kvm_cpu_cap_set(x86_feature);
 }
 
+static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
+}
+
 #endif
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 087a04ae74e4..001d1f830076 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -25,6 +25,7 @@
 #include "trace.h"
 #include "mmu.h"
 #include "x86.h"
+#include "cpuid.h"
 #include "lapic.h"
 #include "svm.h"
 
@@ -243,6 +244,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
 {
 	copy_vmcb_control_area(&svm->nested.ctl, control);
 
+	/* Copy it here because nested_svm_check_controls will check it.  */
+	svm->nested.ctl.asid           = control->asid;
 	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
 	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
 }
@@ -870,6 +873,150 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
 	return NESTED_EXIT_CONTINUE;
 }
 
+static int svm_get_nested_state(struct kvm_vcpu *vcpu,
+				struct kvm_nested_state __user *user_kvm_nested_state,
+				u32 user_data_size)
+{
+	struct vcpu_svm *svm;
+	struct kvm_nested_state kvm_state = {
+		.flags = 0,
+		.format = KVM_STATE_NESTED_FORMAT_SVM,
+		.size = sizeof(kvm_state),
+	};
+	struct vmcb __user *user_vmcb = (struct vmcb __user *)
+		&user_kvm_nested_state->data.svm[0];
+
+	if (!vcpu)
+		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
+
+	svm = to_svm(vcpu);
+
+	if (user_data_size < kvm_state.size)
+		goto out;
+
+	/* First fill in the header and copy it out.  */
+	if (is_guest_mode(vcpu)) {
+		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb;
+		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
+		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
+
+		if (svm->nested.nested_run_pending)
+			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+	}
+
+	if (gif_set(svm))
+		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
+
+	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
+		return -EFAULT;
+
+	if (!is_guest_mode(vcpu))
+		goto out;
+
+	/*
+	 * Copy over the full size of the VMCB rather than just the size
+	 * of the struct.
+	 */
+	if (memzero_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
+		return -EFAULT;
+	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
+			 sizeof(user_vmcb->control)))
+		return -EFAULT;
+	if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
+			 sizeof(user_vmcb->save)))
+		return -EFAULT;
+
+out:
+	return kvm_state.size;
+}
+
+static int svm_set_nested_state(struct kvm_vcpu *vcpu,
+				struct kvm_nested_state __user *user_kvm_nested_state,
+				struct kvm_nested_state *kvm_state)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	struct vmcb *hsave = svm->nested.hsave;
+	struct vmcb __user *user_vmcb = (struct vmcb __user *)
+		&user_kvm_nested_state->data.svm[0];
+	struct vmcb_control_area ctl;
+	struct vmcb_save_area save;
+	u32 cr0;
+
+	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
+		return -EINVAL;
+
+	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
+				 KVM_STATE_NESTED_RUN_PENDING |
+				 KVM_STATE_NESTED_GIF_SET))
+		return -EINVAL;
+
+	/*
+	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
+	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
+	 */
+	if (!(vcpu->arch.efer & EFER_SVME)) {
+		/* GIF=1 and no guest mode are required if SVME=0.  */
+		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
+			return -EINVAL;
+	}
+
+	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
+	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+		return -EINVAL;
+
+	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
+		svm_leave_nested(svm);
+		goto out_set_gif;
+	}
+
+	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
+		return -EINVAL;
+	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
+		return -EINVAL;
+	if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
+		return -EFAULT;
+	if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
+		return -EFAULT;
+
+	if (!nested_vmcb_check_controls(&ctl))
+		return -EINVAL;
+
+	/*
+	 * Processor state contains L2 state.  Check that it is
+	 * valid for guest mode (see nested_vmcb_checks).
+	 */
+	cr0 = kvm_read_cr0(vcpu);
+        if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
+                return -EINVAL;
+
+	/*
+	 * Validate host state saved from before VMRUN (see
+	 * nested_svm_check_permissions).
+	 * TODO: validate reserved bits for all saved state.
+	 */
+	if (!(save.cr0 & X86_CR0_PG))
+		return -EINVAL;
+
+	/*
+	 * All checks done, we can enter guest mode.  L1 control fields
+	 * come from the nested save state.  Guest state is already
+	 * in the registers, the save area of the nested state instead
+	 * contains saved L1 state.
+	 */
+	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
+	hsave->save = save;
+
+	svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
+	load_nested_vmcb_control(svm, &ctl);
+	nested_prepare_vmcb_control(svm);
+
+out_set_gif:
+	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
+	return 0;
+}
+
 struct kvm_x86_nested_ops svm_nested_ops = {
 	.check_events = svm_check_nested_events,
+	.get_state = svm_get_nested_state,
+	.set_state = svm_set_nested_state,
 };
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 56be704ffe95..f64d071715d2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1191,6 +1191,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
 		svm->avic_is_running = true;
 
 	svm->nested.hsave = page_address(hsave_page);
+	clear_page(svm->nested.hsave);
 
 	svm->msrpm = page_address(msrpm_pages);
 	svm_vcpu_init_msrpm(svm->msrpm);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 51ebb60e1533..106fc6fceb97 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -437,11 +437,6 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
 	}
 }
 
-static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
-{
-	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
-}
-
 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
 					       struct vmcs12 *vmcs12)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0001b2addc66..3e9252737f9d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4626,7 +4626,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 
 		if (kvm_state.flags &
 		    ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
-		      | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING))
+		      | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
+		      | KVM_STATE_NESTED_GIF_SET))
 			break;
 
 		/* nested_run_pending implies guest_mode.  */
-- 
2.18.2


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* Re: [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-20 17:21 ` [PATCH 21/24] KVM: x86: always update CR3 in VMCB Paolo Bonzini
@ 2020-05-20 18:22   ` Sean Christopherson
  2020-05-20 20:14     ` Paolo Bonzini
  2020-05-20 18:24   ` Sean Christopherson
  1 sibling, 1 reply; 41+ messages in thread
From: Sean Christopherson @ 2020-05-20 18:22 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On Wed, May 20, 2020 at 01:21:42PM -0400, Paolo Bonzini wrote:
> vmx_load_mmu_pgd is delaying the write of GUEST_CR3 to prepare_vmcs02 as
> an optimization, but this is only correct before the nested vmentry.
> If userspace is modifying CR3 with KVM_SET_SREGS after the VM has
> already been put in guest mode, the value of CR3 will not be updated.
> Remove the optimization, which almost never triggers anyway.
> 
> This also applies to SVM, where the code was added in commit 689f3bf21628
> ("KVM: x86: unify callbacks to load paging root", 2020-03-16) just to keep the
> two vendor-specific modules closer.
> 
> Fixes: 04f11ef45810 ("KVM: nVMX: Always write vmcs02.GUEST_CR3 during nested VM-Enter")
> Fixes: 689f3bf21628 ("KVM: x86: unify callbacks to load paging root")
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

...

> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 55712dd86baf..7daf6a50e774 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -3085,10 +3085,7 @@ void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
>  			spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
>  		}
>  
> -		/* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
> -		if (is_guest_mode(vcpu))
> -			update_guest_cr3 = false;
> -		else if (!enable_unrestricted_guest && !is_paging(vcpu))
> +		if (!enable_unrestricted_guest && !is_paging(vcpu))
>  			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
>  		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))

As an alternative fix, what about marking VCPU_EXREG_CR3 dirty in
__set_sregs()?  E.g.

		/*
		 * Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter, but
		 * it can be explicitly dirtied by KVM_SET_SREGS.
		 */
		if (is_guest_mode(vcpu) &&
		    !test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_dirty))

There's already a dependency on __set_sregs() doing
kvm_register_mark_available() before kvm_mmu_reset_context(), i.e. the
code is already a bit kludgy.  The dirty check would make the kludge less
subtle and provide explicit documentation.

>  			guest_cr3 = vcpu->arch.cr3;

The comment that's just below the context is now stale, e.g. replace
vmcs01.GUEST_CR3 with vmcs.GUEST_CR3.

> -- 
> 2.18.2
> 
> 

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-20 17:21 ` [PATCH 21/24] KVM: x86: always update CR3 in VMCB Paolo Bonzini
  2020-05-20 18:22   ` Sean Christopherson
@ 2020-05-20 18:24   ` Sean Christopherson
  1 sibling, 0 replies; 41+ messages in thread
From: Sean Christopherson @ 2020-05-20 18:24 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

Oh, and it'd be nice to do s/VMCB/VMCB\/VMCS in the subject, I almost
glossed over this patch because it explicitly said VMCB :-)

On Wed, May 20, 2020 at 01:21:42PM -0400, Paolo Bonzini wrote:
> vmx_load_mmu_pgd is delaying the write of GUEST_CR3 to prepare_vmcs02 as
> an optimization, but this is only correct before the nested vmentry.
> If userspace is modifying CR3 with KVM_SET_SREGS after the VM has
> already been put in guest mode, the value of CR3 will not be updated.
> Remove the optimization, which almost never triggers anyway.
> 
> This also applies to SVM, where the code was added in commit 689f3bf21628
> ("KVM: x86: unify callbacks to load paging root", 2020-03-16) just to keep the
> two vendor-specific modules closer.
> 
> Fixes: 04f11ef45810 ("KVM: nVMX: Always write vmcs02.GUEST_CR3 during nested VM-Enter")
> Fixes: 689f3bf21628 ("KVM: x86: unify callbacks to load paging root")
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 00/24] KVM: nSVM: event fixes and migration support
  2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
                   ` (23 preceding siblings ...)
  2020-05-20 17:21 ` [PATCH 24/24] KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE Paolo Bonzini
@ 2020-05-20 19:24 ` Maxim Levitsky
  2020-05-20 20:42   ` Paolo Bonzini
  24 siblings, 1 reply; 41+ messages in thread
From: Maxim Levitsky @ 2020-05-20 19:24 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

On Wed, 2020-05-20 at 13:21 -0400, Paolo Bonzini wrote:
> Large parts of this series were posted before (patches 1, 3-4-5 and
> 6-7-8-12-13-14).  This is basically what I'd like to get into 5.8 as
> far as nested SVM is concerned; the fix for exception vmexits is related
> to migration support, because it gets rid of the exit_required flag
> and therefore consolidates the SVM migration format.
> 
> There are a couple more bugfixes (2 and 21), the latter of which actually
> affects VMX as well.
> 
> The SVM migration data consists of:
> 
> - the GIF state
> 
> - the guest mode and nested-run-pending flags
> 
> - the host state from before VMRUN
> 
> - the nested VMCB control state
> 
> The last two items are conveniently packaged in VMCB format.  Compared
> to the previous prototype, HF_HIF_MASK is removed since it is part of
> "the host state from before VMRUN".
> 
> The patch has been tested with the QEMU changes after my signature,
> where it also fixes system_reset while x86/svm.flat runs.
> 
> Paolo
> 
> Paolo Bonzini (24):
>   KVM: nSVM: fix condition for filtering async PF
>   KVM: nSVM: leave ASID aside in copy_vmcb_control_area
>   KVM: nSVM: inject exceptions via svm_check_nested_events
>   KVM: nSVM: remove exit_required
>   KVM: nSVM: correctly inject INIT vmexits
>   KVM: nSVM: move map argument out of enter_svm_guest_mode
>   KVM: nSVM: extract load_nested_vmcb_control
>   KVM: nSVM: extract preparation of VMCB for nested run
>   KVM: nSVM: clean up tsc_offset update
>   KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area
>   KVM: nSVM: remove trailing padding for struct vmcb_control_area
>   KVM: nSVM: save all control fields in svm->nested
>   KVM: nSVM: do not reload pause filter fields from VMCB
>   KVM: nSVM: remove HF_VINTR_MASK
>   KVM: nSVM: remove HF_HIF_MASK
>   KVM: nSVM: split nested_vmcb_check_controls
>   KVM: nSVM: do all MMU switch work in init/uninit functions
>   KVM: nSVM: leave guest mode when clearing EFER.SVME
>   KVM: nSVM: extract svm_set_gif
>   KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu
>   KVM: x86: always update CR3 in VMCB
>   uaccess: add memzero_user
>   selftests: kvm: add a SVM version of state-test
>   KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE
> 
>  arch/x86/include/asm/kvm_host.h               |   2 -
>  arch/x86/include/asm/svm.h                    |   9 +-
>  arch/x86/include/uapi/asm/kvm.h               |  17 +-
>  arch/x86/kvm/cpuid.h                          |   5 +
>  arch/x86/kvm/irq.c                            |   1 +
>  arch/x86/kvm/mmu.h                            |   2 +-
>  arch/x86/kvm/mmu/mmu.c                        |  14 +-
>  arch/x86/kvm/svm/nested.c                     | 525 +++++++++++-------
>  arch/x86/kvm/svm/svm.c                        | 107 ++--
>  arch/x86/kvm/svm/svm.h                        |  32 +-
>  arch/x86/kvm/vmx/nested.c                     |   5 -
>  arch/x86/kvm/vmx/vmx.c                        |   5 +-
>  arch/x86/kvm/x86.c                            |   3 +-
>  include/linux/uaccess.h                       |   1 +
>  lib/usercopy.c                                |  63 +++
>  .../testing/selftests/kvm/x86_64/state_test.c |  65 ++-
>  16 files changed, 549 insertions(+), 307 deletions(-)
> 

I just smoke-tested this patch series on my system.

Patch 24 doesn't apply cleanly on top of kvm/queue, I appplied it manually,
due to missing KVM_STATE_NESTED_MTF_PENDING bit

Also patch 22 needes ALIGN_UP which is not on mainline.
Probably in linux-next?

With these fixes, I don't see #DE exceptions on a nested guest I try to run
however it still hangs, right around the time it tries to access PS/2 keyboard/mouse.

Best regards,
	Maxim Levitsky


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-20 18:22   ` Sean Christopherson
@ 2020-05-20 20:14     ` Paolo Bonzini
  2020-05-22 22:47       ` Sean Christopherson
  0 siblings, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 20:14 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On 20/05/20 20:22, Sean Christopherson wrote:
> As an alternative fix, what about marking VCPU_EXREG_CR3 dirty in
> __set_sregs()?  E.g.
> 
> 		/*
> 		 * Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter, but
> 		 * it can be explicitly dirtied by KVM_SET_SREGS.
> 		 */
> 		if (is_guest_mode(vcpu) &&
> 		    !test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_dirty))
> 
> There's already a dependency on __set_sregs() doing
> kvm_register_mark_available() before kvm_mmu_reset_context(), i.e. the
> code is already a bit kludgy.  The dirty check would make the kludge less
> subtle and provide explicit documentation.

A comment in __set_sregs is certainly a good idea.  But checking for
dirty seems worse since the caching of CR3 is a bit special in this
respect (it's never marked dirty).

This patch should probably be split too, so that the Fixes tags are
separate for Intel and AMD.

Paolo

>>  			guest_cr3 = vcpu->arch.cr3;
> 
> The comment that's just below the context is now stale, e.g. replace
> vmcs01.GUEST_CR3 with vmcs.GUEST_CR3.
> 
>> -- 
>> 2.18.2
>>
>>
> 


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 17:21 ` [PATCH 22/24] uaccess: add memzero_user Paolo Bonzini
@ 2020-05-20 20:40   ` Christoph Hellwig
  2020-05-20 20:50     ` Al Viro
  2020-05-20 21:13     ` Paolo Bonzini
  0 siblings, 2 replies; 41+ messages in thread
From: Christoph Hellwig @ 2020-05-20 20:40 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On Wed, May 20, 2020 at 01:21:43PM -0400, Paolo Bonzini wrote:
> +			unsafe_put_user(val, (unsigned long __user *) from, err_fault);

This adds a way too long line.  In many ways it would be much nicer if
you used an unsigned long __user * variable internally, a that would
remove all these crazy casts and actually make the code readable.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 00/24] KVM: nSVM: event fixes and migration support
  2020-05-20 19:24 ` [PATCH 00/24] KVM: nSVM: event fixes and migration support Maxim Levitsky
@ 2020-05-20 20:42   ` Paolo Bonzini
  2020-05-20 21:08     ` Maxim Levitsky
  0 siblings, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 20:42 UTC (permalink / raw)
  To: Maxim Levitsky, linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

On 20/05/20 21:24, Maxim Levitsky wrote:
> Patch 24 doesn't apply cleanly on top of kvm/queue, I appplied it manually,
> due to missing KVM_STATE_NESTED_MTF_PENDING bit
> 
> Also patch 22 needes ALIGN_UP which is not on mainline.
> Probably in linux-next?

Just replace it with ALIGN.  (I tested it with memzero_user in
arch/x86/kvm/ for convenience, and the lib/ patch ended up out of sync
with the actual code).

> With these fixes, I don't see #DE exceptions on a nested guest I try to run
> however it still hangs, right around the time it tries to access PS/2 keyboard/mouse.

IIRC you said that the bug appeared with the vintr rework, and then went
from hang to #DE and now back to hang?  And the hang is reported by L2,
not L1?

In order to debug the hang, a good start would be to understand if it
also happens with vgif=0.  This is because with vgif=1 we use VINTR
intercepts even while GIF=0, so the whole thing is a bit more complicated.

Paolo


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 20:40   ` Christoph Hellwig
@ 2020-05-20 20:50     ` Al Viro
  2020-05-20 21:13     ` Paolo Bonzini
  1 sibling, 0 replies; 41+ messages in thread
From: Al Viro @ 2020-05-20 20:50 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Paolo Bonzini, linux-kernel, kvm, vkuznets, Joerg Roedel

On Wed, May 20, 2020 at 01:40:36PM -0700, Christoph Hellwig wrote:
> On Wed, May 20, 2020 at 01:21:43PM -0400, Paolo Bonzini wrote:
> > +			unsafe_put_user(val, (unsigned long __user *) from, err_fault);
> 
> This adds a way too long line.  In many ways it would be much nicer if
> you used an unsigned long __user * variable internally, a that would
> remove all these crazy casts and actually make the code readable.

Er...  what's wrong with clear_user(), anyway?

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 00/24] KVM: nSVM: event fixes and migration support
  2020-05-20 20:42   ` Paolo Bonzini
@ 2020-05-20 21:08     ` Maxim Levitsky
  2020-05-20 21:15       ` Paolo Bonzini
  0 siblings, 1 reply; 41+ messages in thread
From: Maxim Levitsky @ 2020-05-20 21:08 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

On Wed, 2020-05-20 at 22:42 +0200, Paolo Bonzini wrote:
> On 20/05/20 21:24, Maxim Levitsky wrote:
> > Patch 24 doesn't apply cleanly on top of kvm/queue, I appplied it manually,
> > due to missing KVM_STATE_NESTED_MTF_PENDING bit
> > 
> > Also patch 22 needes ALIGN_UP which is not on mainline.
> > Probably in linux-next?
> 
> Just replace it with ALIGN.  (I tested it with memzero_user in
> arch/x86/kvm/ for convenience, and the lib/ patch ended up out of sync
> with the actual code).
That is exactly what I did.

> 
> > With these fixes, I don't see #DE exceptions on a nested guest I try to run
> > however it still hangs, right around the time it tries to access PS/2 keyboard/mouse.
> 
> IIRC you said that the bug appeared with the vintr rework, and then went
> from hang to #DE and now back to hang?  And the hang is reported by L2,
> not L1?
Yes, and now the hang appears to be deterministic. The initial hang could happen
randomally. I remember that once the nested guest got to getty prompt even and hanged
on shutdown. Now hang is deterministic when kernel prints something about PS/2.
I will re-check this tomorrow.

> 
> In order to debug the hang, a good start would be to understand if it
> also happens with vgif=0.  This is because with vgif=1 we use VINTR
> intercepts even while GIF=0, so the whole thing is a bit more complicated.
I will check this tomorrow as well.


Best regards,
	Maxim Levitsky

> 
> Paolo
> 



^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 20:40   ` Christoph Hellwig
  2020-05-20 20:50     ` Al Viro
@ 2020-05-20 21:13     ` Paolo Bonzini
  2020-05-20 21:28       ` Al Viro
  1 sibling, 1 reply; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 21:13 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On 20/05/20 22:40, Christoph Hellwig wrote:
> On Wed, May 20, 2020 at 01:21:43PM -0400, Paolo Bonzini wrote:
>> +			unsafe_put_user(val, (unsigned long __user *) from, err_fault);
> This adds a way too long line.  In many ways it would be much nicer if
> you used an unsigned long __user * variable internally, a that would
> remove all these crazy casts and actually make the code readable.
> 

Good idea, thanks.

Paolo


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 00/24] KVM: nSVM: event fixes and migration support
  2020-05-20 21:08     ` Maxim Levitsky
@ 2020-05-20 21:15       ` Paolo Bonzini
  0 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 21:15 UTC (permalink / raw)
  To: Maxim Levitsky, linux-kernel, kvm; +Cc: vkuznets, Joerg Roedel

On 20/05/20 23:08, Maxim Levitsky wrote:
>> IIRC you said that the bug appeared with the vintr rework, and then went
>> from hang to #DE and now back to hang?  And the hang is reported by L2,
>> not L1?
> Yes, and now the hang appears to be deterministic.

Ok, that's actually progress.  Also because we can write a testcase.

Paolo


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 21:13     ` Paolo Bonzini
@ 2020-05-20 21:28       ` Al Viro
  2020-05-20 21:33         ` Paolo Bonzini
  0 siblings, 1 reply; 41+ messages in thread
From: Al Viro @ 2020-05-20 21:28 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Christoph Hellwig, linux-kernel, kvm, vkuznets, Joerg Roedel

On Wed, May 20, 2020 at 11:13:36PM +0200, Paolo Bonzini wrote:
> On 20/05/20 22:40, Christoph Hellwig wrote:
> > On Wed, May 20, 2020 at 01:21:43PM -0400, Paolo Bonzini wrote:
> >> +			unsafe_put_user(val, (unsigned long __user *) from, err_fault);
> > This adds a way too long line.  In many ways it would be much nicer if
> > you used an unsigned long __user * variable internally, a that would
> > remove all these crazy casts and actually make the code readable.
> > 
> 
> Good idea, thanks.

Unless I'm seriously misreading that patch, it could've been done as

static inline __must_check int memzero_user(void __user *addr, size_t size)
{
	return clear_user(addr, n) ? -EFAULT : 0;
}

What am I missing?

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 22/24] uaccess: add memzero_user
  2020-05-20 21:28       ` Al Viro
@ 2020-05-20 21:33         ` Paolo Bonzini
  0 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-20 21:33 UTC (permalink / raw)
  To: Al Viro; +Cc: Christoph Hellwig, linux-kernel, kvm, vkuznets, Joerg Roedel

On 20/05/20 23:28, Al Viro wrote:
> Unless I'm seriously misreading that patch, it could've been done as
> 
> static inline __must_check int memzero_user(void __user *addr, size_t size)
> {
> 	return clear_user(addr, n) ? -EFAULT : 0;
> }
> 
> What am I missing?

Ok, I have a serious tunnel vision problem.  Thanks very much Al.

Since I have your attention, would you take a look at
https://lore.kernel.org/lkml/20200421135119.30007-1-eesposit@redhat.com/
please?

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-20 20:14     ` Paolo Bonzini
@ 2020-05-22 22:47       ` Sean Christopherson
  2020-05-23  7:07         ` Paolo Bonzini
  0 siblings, 1 reply; 41+ messages in thread
From: Sean Christopherson @ 2020-05-22 22:47 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On Wed, May 20, 2020 at 10:14:47PM +0200, Paolo Bonzini wrote:
> On 20/05/20 20:22, Sean Christopherson wrote:
> > As an alternative fix, what about marking VCPU_EXREG_CR3 dirty in
> > __set_sregs()?  E.g.
> > 
> > 		/*
> > 		 * Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter, but
> > 		 * it can be explicitly dirtied by KVM_SET_SREGS.
> > 		 */
> > 		if (is_guest_mode(vcpu) &&
> > 		    !test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_dirty))
> > 
> > There's already a dependency on __set_sregs() doing
> > kvm_register_mark_available() before kvm_mmu_reset_context(), i.e. the
> > code is already a bit kludgy.  The dirty check would make the kludge less
> > subtle and provide explicit documentation.
> 
> A comment in __set_sregs is certainly a good idea.  But checking for
> dirty seems worse since the caching of CR3 is a bit special in this
> respect (it's never marked dirty).

That's why I thought it was so clever :-)

> This patch should probably be split too, so that the Fixes tags are
> separate for Intel and AMD.

That would be nice.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 21/24] KVM: x86: always update CR3 in VMCB
  2020-05-22 22:47       ` Sean Christopherson
@ 2020-05-23  7:07         ` Paolo Bonzini
  0 siblings, 0 replies; 41+ messages in thread
From: Paolo Bonzini @ 2020-05-23  7:07 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: linux-kernel, kvm, vkuznets, Joerg Roedel

On 23/05/20 00:47, Sean Christopherson wrote:
> On Wed, May 20, 2020 at 10:14:47PM +0200, Paolo Bonzini wrote:
>> This patch should probably be split too, so that the Fixes tags are
>> separate for Intel and AMD.
> 
> That would be nice.

Will do.  Anyway this series will be quite different in v2, and there
will be a couple more changes to common code to avoid repeated calls to
kvm_cpu_has_injectable_intr (because on AMD I'd like to avoid
unnecessary calls to enable_irq_window, it is already complicated enough
without those).

Paolo


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area
  2020-05-20 17:21 ` [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area Paolo Bonzini
@ 2020-05-26  0:23   ` Sasha Levin
  0 siblings, 0 replies; 41+ messages in thread
From: Sasha Levin @ 2020-05-26  0:23 UTC (permalink / raw)
  To: Sasha Levin, Paolo Bonzini, linux-kernel, kvm
  Cc: vkuznets, Joerg Roedel, stable, stable

Hi

[This is an automated email]

This commit has been processed because it contains a -stable tag.
The stable tag indicates that it's relevant for the following trees: all

The bot has tested the following trees: v5.6.14, v5.4.42, v4.19.124, v4.14.181, v4.9.224, v4.4.224.

v5.6.14: Failed to apply! Possible dependencies:
    320debe5ef6d ("x86/kvm: Convert to new CPU match macros")
    46a010dd6896 ("kVM SVM: Move SVM related files to own sub-directory")
    64b5bd270426 ("KVM: nSVM: ignore L1 interrupt window while running L2 with V_INTR_MASKING=1")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    b5ec2e020b70 ("KVM: nSVM: do not change host intercepts while nested VM is running")
    ba5bade4cc0d ("x86/devicetable: Move x86 specific macro out of generic code")

v5.4.42: Failed to apply! Possible dependencies:
    320debe5ef6d ("x86/kvm: Convert to new CPU match macros")
    33af3a7ef9e6 ("KVM: SVM: Reduce WBINVD/DF_FLUSH invocations")
    46a010dd6896 ("kVM SVM: Move SVM related files to own sub-directory")
    4be946728f65 ("KVM: x86/vPMU: Declare kvm_pmu->reprogram_pmi field using DECLARE_BITMAP")
    4e19c36f2df8 ("kvm: x86: Introduce APICv inhibit reason bits")
    575b255c1663 ("KVM: x86: allow compiling as non-module with W=1")
    83af5e65a895 ("KVM: SVM: Guard against DEACTIVATE when performing WBINVD/DF_FLUSH")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    98ff80f5b788 ("KVM: x86/vPMU: Rename pmu_ops callbacks from msr_idx to rdpmc_ecx")
    9a0bf0543069 ("svm: Deactivate AVIC when launching guest with nested SVM support")
    a6da0d77e98e ("KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter")
    b35e5548b411 ("KVM: x86/vPMU: Add lazy mechanism to release perf_event per vPMC")
    ba5bade4cc0d ("x86/devicetable: Move x86 specific macro out of generic code")
    c900c156c518 ("KVM: x86/vPMU: Introduce a new kvm_pmu_ops->msr_idx_to_pmc callback")
    e3b9a9e147db ("KVM: SVM: Serialize access to the SEV ASID bitmap")
    f3515dc3bef8 ("svm: Temporarily deactivate AVIC during ExtINT handling")
    f4fdc0a2edf4 ("kvm: x86: hyperv: Use APICv update request interface")

v4.19.124: Failed to apply! Possible dependencies:
    09abb5e3e5e5 ("KVM: nVMX: call kvm_skip_emulated_instruction in nested_vmx_{fail,succeed}")
    09abe3200266 ("KVM: nVMX: split pieces of prepare_vmcs02() to prepare_vmcs02_early()")
    1438921c6dc1 ("KVM: nVMX: Flush TLB entries tagged by dest EPTP on L1<->L2 transitions")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    1abf23fb42f5 ("KVM: nVMX: use vm_exit_controls_init() to write exit controls for vmcs02")
    327c072187f7 ("KVM: nVMX: Flush linear and combined mappings on VPID02 related flushes")
    3d5bdae8b164 ("KVM: nVMX: Use correct VPID02 when emulating L1 INVVPID")
    3df5c37e55c8 ("KVM: nVMX: try to set EFER bits correctly when initializing controls")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5b8ba41dafd7 ("KVM: nVMX: move vmcs12 EPTP consistency check to check_vmentry_prereqs()")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a633e41e7362 ("KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode()")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    d63907dc7dd1 ("KVM: nVMX: rename enter_vmx_non_root_mode to nested_vmx_enter_non_root_mode")
    efebf0aaec3d ("KVM: nVMX: Do not flush TLB on L1<->L2 transitions if L1 uses VPID and EPT")

v4.14.181: Failed to apply! Possible dependencies:
    0234bf885236 ("KVM: x86: introduce ISA specific SMM entry/exit callbacks")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    44900ba65e16 ("KVM: VMX: optimize shadow VMCS copying")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5b15706dbf5b ("kvm: vmx: Introduce VMCS12_MAX_FIELD_INDEX")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    6677f3dad874 ("KVM: nVMX: introduce struct nested_vmx_msrs")
    72d7b374b14d ("KVM: x86: introduce ISA specific smi_allowed callback")
    72e9cbdb4338 ("KVM: nVMX: fix SMI injection in guest mode")
    736fdf72518b ("KVM: VMX: rename RDSEED and RDRAND vmx ctrls to reflect exiting")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    c5d167b27e00 ("KVM: vmx: shadow more fields that are read/written on every vmexits")
    c9e9deae76b8 ("KVM: VMX: split list of shadowed VMCS field to a separate file")
    f4160e459c57 ("kvm: nVMX: Add support for "VMWRITE to any supported field"")

v4.9.224: Failed to apply! Possible dependencies:
    1279a6b124e4 ("KVM: nVMX: single function for switching between vmcs")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    27c42a1bb867 ("KVM: nVMX: Enable VMFUNC for the L1 hypervisor")
    2a499e49c2ec ("KVM: vmx: Enable VMFUNCs")
    3899152ccbf4 ("KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation")
    44900ba65e16 ("KVM: VMX: optimize shadow VMCS copying")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    4f350c6dbcb9 ("kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5a6a9748b4b4 ("KVM: nVMX: load GUEST_EFER after GUEST_CR0 during emulated VM-entry")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    62cc6b9dc61e ("KVM: nVMX: support restore of VMX capability MSRs")
    72e9cbdb4338 ("KVM: nVMX: fix SMI injection in guest mode")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    7ca29de21362 ("KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT")
    858e25c06fb0 ("kvm: nVMX: Refactor nested_vmx_run()")
    85fd514e2423 ("kvm: nVMX: Shadow "high" parts of shadowed 64-bit VMCS fields")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    9ed38ffad473 ("KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    a8bc284eb70f ("kvm: nVMX: Refactor handle_vmptrld()")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    c5d167b27e00 ("KVM: vmx: shadow more fields that are read/written on every vmexits")
    c7c2c709b60e ("kvm: nVMX: Validate CR3 target count on nested VM-entry")
    c9e9deae76b8 ("KVM: VMX: split list of shadowed VMCS field to a separate file")
    ca0bde28f2ed ("kvm: nVMX: Split VMCS checks from nested_vmx_run()")
    cf3215d9394a ("kvm: nVMX: Fetch VM_INSTRUCTION_ERROR from vmcs02 on vmx->fail")
    ee146c1c100d ("KVM: nVMX: propagate errors from prepare_vmcs02")

v4.4.224: Failed to apply! Possible dependencies:
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    355f4fb1405e ("kvm: nVMX: VMCLEAR an active shadow VMCS after last use")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    4f2777bc9797 ("kvm: x86: nVMX: maintain internal copy of current VMCS")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    6308630bd3db ("kvm/x86: split ioapic-handled and EOI exit bitmaps")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    a8bc284eb70f ("kvm: nVMX: Refactor handle_vmptrld()")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    bb3541f175a9 ("KVM: x86: Fix typos")
    d62caabb41f3 ("kvm/x86: per-vcpu apicv deactivation support")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF
  2020-05-20 17:21 ` [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF Paolo Bonzini
@ 2020-05-26  0:23   ` Sasha Levin
  0 siblings, 0 replies; 41+ messages in thread
From: Sasha Levin @ 2020-05-26  0:23 UTC (permalink / raw)
  To: Sasha Levin, Paolo Bonzini, linux-kernel, kvm
  Cc: vkuznets, Joerg Roedel, stable, stable

Hi

[This is an automated email]

This commit has been processed because it contains a -stable tag.
The stable tag indicates that it's relevant for the following trees: all

The bot has tested the following trees: v5.6.14, v5.4.42, v4.19.124, v4.14.181, v4.9.224, v4.4.224.

v5.6.14: Failed to apply! Possible dependencies:
    320debe5ef6d ("x86/kvm: Convert to new CPU match macros")
    46a010dd6896 ("kVM SVM: Move SVM related files to own sub-directory")
    64b5bd270426 ("KVM: nSVM: ignore L1 interrupt window while running L2 with V_INTR_MASKING=1")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    b5ec2e020b70 ("KVM: nSVM: do not change host intercepts while nested VM is running")
    ba5bade4cc0d ("x86/devicetable: Move x86 specific macro out of generic code")

v5.4.42: Failed to apply! Possible dependencies:
    320debe5ef6d ("x86/kvm: Convert to new CPU match macros")
    33af3a7ef9e6 ("KVM: SVM: Reduce WBINVD/DF_FLUSH invocations")
    46a010dd6896 ("kVM SVM: Move SVM related files to own sub-directory")
    4be946728f65 ("KVM: x86/vPMU: Declare kvm_pmu->reprogram_pmi field using DECLARE_BITMAP")
    4e19c36f2df8 ("kvm: x86: Introduce APICv inhibit reason bits")
    575b255c1663 ("KVM: x86: allow compiling as non-module with W=1")
    83af5e65a895 ("KVM: SVM: Guard against DEACTIVATE when performing WBINVD/DF_FLUSH")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    98ff80f5b788 ("KVM: x86/vPMU: Rename pmu_ops callbacks from msr_idx to rdpmc_ecx")
    9a0bf0543069 ("svm: Deactivate AVIC when launching guest with nested SVM support")
    a6da0d77e98e ("KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter")
    b35e5548b411 ("KVM: x86/vPMU: Add lazy mechanism to release perf_event per vPMC")
    ba5bade4cc0d ("x86/devicetable: Move x86 specific macro out of generic code")
    c900c156c518 ("KVM: x86/vPMU: Introduce a new kvm_pmu_ops->msr_idx_to_pmc callback")
    e3b9a9e147db ("KVM: SVM: Serialize access to the SEV ASID bitmap")
    f3515dc3bef8 ("svm: Temporarily deactivate AVIC during ExtINT handling")
    f4fdc0a2edf4 ("kvm: x86: hyperv: Use APICv update request interface")

v4.19.124: Failed to apply! Possible dependencies:
    09abb5e3e5e5 ("KVM: nVMX: call kvm_skip_emulated_instruction in nested_vmx_{fail,succeed}")
    09abe3200266 ("KVM: nVMX: split pieces of prepare_vmcs02() to prepare_vmcs02_early()")
    1438921c6dc1 ("KVM: nVMX: Flush TLB entries tagged by dest EPTP on L1<->L2 transitions")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    1abf23fb42f5 ("KVM: nVMX: use vm_exit_controls_init() to write exit controls for vmcs02")
    327c072187f7 ("KVM: nVMX: Flush linear and combined mappings on VPID02 related flushes")
    3d5bdae8b164 ("KVM: nVMX: Use correct VPID02 when emulating L1 INVVPID")
    3df5c37e55c8 ("KVM: nVMX: try to set EFER bits correctly when initializing controls")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5b8ba41dafd7 ("KVM: nVMX: move vmcs12 EPTP consistency check to check_vmentry_prereqs()")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a633e41e7362 ("KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode()")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    d63907dc7dd1 ("KVM: nVMX: rename enter_vmx_non_root_mode to nested_vmx_enter_non_root_mode")
    efebf0aaec3d ("KVM: nVMX: Do not flush TLB on L1<->L2 transitions if L1 uses VPID and EPT")

v4.14.181: Failed to apply! Possible dependencies:
    0234bf885236 ("KVM: x86: introduce ISA specific SMM entry/exit callbacks")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    44900ba65e16 ("KVM: VMX: optimize shadow VMCS copying")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5b15706dbf5b ("kvm: vmx: Introduce VMCS12_MAX_FIELD_INDEX")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    6677f3dad874 ("KVM: nVMX: introduce struct nested_vmx_msrs")
    72d7b374b14d ("KVM: x86: introduce ISA specific smi_allowed callback")
    72e9cbdb4338 ("KVM: nVMX: fix SMI injection in guest mode")
    736fdf72518b ("KVM: VMX: rename RDSEED and RDRAND vmx ctrls to reflect exiting")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    c5d167b27e00 ("KVM: vmx: shadow more fields that are read/written on every vmexits")
    c9e9deae76b8 ("KVM: VMX: split list of shadowed VMCS field to a separate file")
    f4160e459c57 ("kvm: nVMX: Add support for "VMWRITE to any supported field"")

v4.9.224: Failed to apply! Possible dependencies:
    1279a6b124e4 ("KVM: nVMX: single function for switching between vmcs")
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    27c42a1bb867 ("KVM: nVMX: Enable VMFUNC for the L1 hypervisor")
    2a499e49c2ec ("KVM: vmx: Enable VMFUNCs")
    3899152ccbf4 ("KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation")
    44900ba65e16 ("KVM: VMX: optimize shadow VMCS copying")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    4f350c6dbcb9 ("kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    5a6a9748b4b4 ("KVM: nVMX: load GUEST_EFER after GUEST_CR0 during emulated VM-entry")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    62cc6b9dc61e ("KVM: nVMX: support restore of VMX capability MSRs")
    72e9cbdb4338 ("KVM: nVMX: fix SMI injection in guest mode")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    7ca29de21362 ("KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT")
    858e25c06fb0 ("kvm: nVMX: Refactor nested_vmx_run()")
    85fd514e2423 ("kvm: nVMX: Shadow "high" parts of shadowed 64-bit VMCS fields")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    9ed38ffad473 ("KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    a8bc284eb70f ("kvm: nVMX: Refactor handle_vmptrld()")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    c5d167b27e00 ("KVM: vmx: shadow more fields that are read/written on every vmexits")
    c7c2c709b60e ("kvm: nVMX: Validate CR3 target count on nested VM-entry")
    c9e9deae76b8 ("KVM: VMX: split list of shadowed VMCS field to a separate file")
    ca0bde28f2ed ("kvm: nVMX: Split VMCS checks from nested_vmx_run()")
    cf3215d9394a ("kvm: nVMX: Fetch VM_INSTRUCTION_ERROR from vmcs02 on vmx->fail")
    ee146c1c100d ("KVM: nVMX: propagate errors from prepare_vmcs02")

v4.4.224: Failed to apply! Possible dependencies:
    199b118ab3d5 ("KVM: VMX: Alphabetize the includes in vmx.c")
    355f4fb1405e ("kvm: nVMX: VMCLEAR an active shadow VMCS after last use")
    453eafbe65f7 ("KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines")
    4f2777bc9797 ("kvm: x86: nVMX: maintain internal copy of current VMCS")
    55d2375e58a6 ("KVM: nVMX: Move nested code to dedicated files")
    609363cf81fc ("KVM: nVMX: Move vmcs12 code to dedicated files")
    6308630bd3db ("kvm/x86: split ioapic-handled and EOI exit bitmaps")
    74a497fae754 ("KVM: nVMX: track dirty state of non-shadowed VMCS fields")
    75edce8a4548 ("KVM: VMX: Move eVMCS code to dedicated files")
    883b0a91f41a ("KVM: SVM: Move Nested SVM Implementation to nested.c")
    945679e301ea ("KVM: nVMX: add enlightened VMCS state")
    a821bab2d1ee ("KVM: VMX: Move VMX specific files to a "vmx" subdirectory")
    a8bc284eb70f ("kvm: nVMX: Refactor handle_vmptrld()")
    b8bbab928fb1 ("KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR")
    bb3541f175a9 ("KVM: x86: Fix typos")
    d62caabb41f3 ("kvm/x86: per-vcpu apicv deactivation support")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 41+ messages in thread

end of thread, other threads:[~2020-05-26  0:24 UTC | newest]

Thread overview: 41+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-20 17:21 [PATCH 00/24] KVM: nSVM: event fixes and migration support Paolo Bonzini
2020-05-20 17:21 ` [PATCH 01/24] KVM: nSVM: fix condition for filtering async PF Paolo Bonzini
2020-05-26  0:23   ` Sasha Levin
2020-05-20 17:21 ` [PATCH 02/24] KVM: nSVM: leave ASID aside in copy_vmcb_control_area Paolo Bonzini
2020-05-26  0:23   ` Sasha Levin
2020-05-20 17:21 ` [PATCH 03/24] KVM: nSVM: inject exceptions via svm_check_nested_events Paolo Bonzini
2020-05-20 17:21 ` [PATCH 04/24] KVM: nSVM: remove exit_required Paolo Bonzini
2020-05-20 17:21 ` [PATCH 05/24] KVM: nSVM: correctly inject INIT vmexits Paolo Bonzini
2020-05-20 17:21 ` [PATCH 06/24] KVM: nSVM: move map argument out of enter_svm_guest_mode Paolo Bonzini
2020-05-20 17:21 ` [PATCH 07/24] KVM: nSVM: extract load_nested_vmcb_control Paolo Bonzini
2020-05-20 17:21 ` [PATCH 08/24] KVM: nSVM: extract preparation of VMCB for nested run Paolo Bonzini
2020-05-20 17:21 ` [PATCH 09/24] KVM: nSVM: clean up tsc_offset update Paolo Bonzini
2020-05-20 17:21 ` [PATCH 10/24] KVM: nSVM: pass vmcb_control_area to copy_vmcb_control_area Paolo Bonzini
2020-05-20 17:21 ` [PATCH 11/24] KVM: nSVM: remove trailing padding for struct vmcb_control_area Paolo Bonzini
2020-05-20 17:21 ` [PATCH 12/24] KVM: nSVM: save all control fields in svm->nested Paolo Bonzini
2020-05-20 17:21 ` [PATCH 13/24] KVM: nSVM: do not reload pause filter fields from VMCB Paolo Bonzini
2020-05-20 17:21 ` [PATCH 14/24] KVM: nSVM: remove HF_VINTR_MASK Paolo Bonzini
2020-05-20 17:21 ` [PATCH 15/24] KVM: nSVM: remove HF_HIF_MASK Paolo Bonzini
2020-05-20 17:21 ` [PATCH 16/24] KVM: nSVM: split nested_vmcb_check_controls Paolo Bonzini
2020-05-20 17:21 ` [PATCH 17/24] KVM: nSVM: do all MMU switch work in init/uninit functions Paolo Bonzini
2020-05-20 17:21 ` [PATCH 18/24] KVM: nSVM: leave guest mode when clearing EFER.SVME Paolo Bonzini
2020-05-20 17:21 ` [PATCH 19/24] KVM: nSVM: extract svm_set_gif Paolo Bonzini
2020-05-20 17:21 ` [PATCH 20/24] KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu Paolo Bonzini
2020-05-20 17:21 ` [PATCH 21/24] KVM: x86: always update CR3 in VMCB Paolo Bonzini
2020-05-20 18:22   ` Sean Christopherson
2020-05-20 20:14     ` Paolo Bonzini
2020-05-22 22:47       ` Sean Christopherson
2020-05-23  7:07         ` Paolo Bonzini
2020-05-20 18:24   ` Sean Christopherson
2020-05-20 17:21 ` [PATCH 22/24] uaccess: add memzero_user Paolo Bonzini
2020-05-20 20:40   ` Christoph Hellwig
2020-05-20 20:50     ` Al Viro
2020-05-20 21:13     ` Paolo Bonzini
2020-05-20 21:28       ` Al Viro
2020-05-20 21:33         ` Paolo Bonzini
2020-05-20 17:21 ` [PATCH 23/24] selftests: kvm: add a SVM version of state-test Paolo Bonzini
2020-05-20 17:21 ` [PATCH 24/24] KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE Paolo Bonzini
2020-05-20 19:24 ` [PATCH 00/24] KVM: nSVM: event fixes and migration support Maxim Levitsky
2020-05-20 20:42   ` Paolo Bonzini
2020-05-20 21:08     ` Maxim Levitsky
2020-05-20 21:15       ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).