All of lore.kernel.org
 help / color / mirror / Atom feed
* [kvm PATCH 0/2] *** Enable VMX preemption timer migration ***
@ 2020-04-17 18:34 Makarand Sonare
  2020-04-17 18:34 ` [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration Makarand Sonare
  2020-04-17 18:34 ` [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran Makarand Sonare
  0 siblings, 2 replies; 11+ messages in thread
From: Makarand Sonare @ 2020-04-17 18:34 UTC (permalink / raw)
  To: kvm, pshier, jmattson; +Cc: Makarand Sonare

*** BLURB HERE ***

Makarand Sonare (1):
  KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran

Peter Shier (1):
  KVM: nVMX - enable VMX preemption timer migration

 Documentation/virt/kvm/api.rst        |  4 ++
 arch/x86/include/uapi/asm/kvm.h       |  2 +
 arch/x86/kvm/vmx/nested.c             | 64 +++++++++++++++++++++++----
 arch/x86/kvm/vmx/vmx.h                |  1 +
 arch/x86/kvm/x86.c                    |  1 +
 include/uapi/linux/kvm.h              |  1 +
 tools/arch/x86/include/uapi/asm/kvm.h |  2 +
 7 files changed, 67 insertions(+), 8 deletions(-)

--
2.26.1.301.g55bc3eb7cb9-goog


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration
  2020-04-17 18:34 [kvm PATCH 0/2] *** Enable VMX preemption timer migration *** Makarand Sonare
@ 2020-04-17 18:34 ` Makarand Sonare
  2020-04-22  2:36   ` Sean Christopherson
  2020-04-17 18:34 ` [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran Makarand Sonare
  1 sibling, 1 reply; 11+ messages in thread
From: Makarand Sonare @ 2020-04-17 18:34 UTC (permalink / raw)
  To: kvm, pshier, jmattson; +Cc: Makarand Sonare

From: Peter Shier <pshier@google.com>

Add new field to hold preemption timer remaining until expiration
appended to struct kvm_vmx_nested_state_data. KVM_SET_NESTED_STATE restarts
timer using migrated state regardless of whether L1 sets
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER.

Fixes: cf8b84f48a593 ("kvm: nVMX: Prepare for checkpointing L2 state")

Signed-off-by: Makarand Sonare <makarandsonare@google.com>
Signed-off-by: Peter Shier <pshier@google.com>
Change-Id: I6446aba5a547afa667f0ef4620b1b76115bf3753
---
 Documentation/virt/kvm/api.rst        |  4 ++
 arch/x86/include/uapi/asm/kvm.h       |  2 +
 arch/x86/kvm/vmx/nested.c             | 62 +++++++++++++++++++++++----
 arch/x86/kvm/vmx/vmx.h                |  1 +
 arch/x86/kvm/x86.c                    |  1 +
 include/uapi/linux/kvm.h              |  1 +
 tools/arch/x86/include/uapi/asm/kvm.h |  2 +
 7 files changed, 65 insertions(+), 8 deletions(-)

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index efbbe570aa9b7..097e422ded063 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4326,6 +4326,9 @@ Errors:
   #define KVM_STATE_NESTED_RUN_PENDING		0x00000002
   #define KVM_STATE_NESTED_EVMCS		0x00000004
 
+  /* Available with KVM_CAP_NESTED_PREEMPTION_TIMER */
+  #define KVM_STATE_NESTED_PREEMPTION_TIMER	0x00000010
+
   #define KVM_STATE_NESTED_FORMAT_VMX		0
   #define KVM_STATE_NESTED_FORMAT_SVM		1
 
@@ -4346,6 +4349,7 @@ Errors:
   struct kvm_vmx_nested_state_data {
 	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
 	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+	__u32 preemption_timer_remaining;
   };
 
 This ioctl copies the vcpu's nested virtualization state from the kernel to
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 3f3f780c8c650..abaddea6f8ff4 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -391,6 +391,7 @@ struct kvm_sync_regs {
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
 #define KVM_STATE_NESTED_EVMCS		0x00000004
 #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
+#define KVM_STATE_NESTED_PREEMPTION_TIMER	0x00000010
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
@@ -400,6 +401,7 @@ struct kvm_sync_regs {
 struct kvm_vmx_nested_state_data {
 	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
 	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+	__u32 preemption_timer_remaining;
 };
 
 struct kvm_vmx_nested_state_hdr {
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index cbc9ea2de28f9..5365d7e5921ea 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2014,15 +2014,16 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 		container_of(timer, struct vcpu_vmx, nested.preemption_timer);
 
 	vmx->nested.preemption_timer_expired = true;
+	vmx->nested.preemption_timer_remaining = 0;
 	kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
 	kvm_vcpu_kick(&vmx->vcpu);
 
 	return HRTIMER_NORESTART;
 }
 
-static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
+static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
+					u64 preemption_timeout)
 {
-	u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
 	/*
@@ -3293,8 +3294,15 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
 	 * the timer.
 	 */
 	vmx->nested.preemption_timer_expired = false;
-	if (nested_cpu_has_preemption_timer(vmcs12))
-		vmx_start_preemption_timer(vcpu);
+	if (nested_cpu_has_preemption_timer(vmcs12)) {
+		u64 preemption_timeout;
+
+		if (from_vmentry)
+			preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
+		else
+			preemption_timeout = vmx->nested.preemption_timer_remaining;
+		vmx_start_preemption_timer(vcpu, preemption_timeout);
+	}
 
 	/*
 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -3888,10 +3896,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	else
 		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
 
-	if (nested_cpu_has_preemption_timer(vmcs12) &&
-	    vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+	if (nested_cpu_has_preemption_timer(vmcs12)) {
+		vmx->nested.preemption_timer_remaining =
+			vmx_get_preemption_timer_value(vcpu);
+		if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
 			vmcs12->vmx_preemption_timer_value =
-				vmx_get_preemption_timer_value(vcpu);
+				vmx->nested.preemption_timer_remaining;
+	}
 
 	/*
 	 * In some cases (usually, nested EPT), L2 is allowed to change its
@@ -5759,6 +5770,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 
 			if (vmx->nested.mtf_pending)
 				kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
+
+			if (nested_cpu_has_preemption_timer(vmcs12)) {
+				kvm_state.flags |=
+					KVM_STATE_NESTED_PREEMPTION_TIMER;
+				kvm_state.size +=
+					sizeof(user_vmx_nested_state->preemption_timer_remaining);
+			}
 		}
 	}
 
@@ -5790,6 +5808,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 
 	BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
 	BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
+	BUILD_BUG_ON(sizeof(user_vmx_nested_state->preemption_timer_remaining)
+		    != sizeof(vmx->nested.preemption_timer_remaining));
+
 
 	/*
 	 * Copy over the full allocated size of vmcs12 rather than just the size
@@ -5805,6 +5826,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 			return -EFAULT;
 	}
 
+	if (kvm_state.flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
+		if (copy_to_user(&user_vmx_nested_state->preemption_timer_remaining,
+				 &vmx->nested.preemption_timer_remaining,
+				 sizeof(vmx->nested.preemption_timer_remaining)))
+			return -EFAULT;
+	}
+
 out:
 	return kvm_state.size;
 }
@@ -5876,7 +5904,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 	 */
 	if (is_smm(vcpu) ?
 		(kvm_state->flags &
-		 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
+		 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING |
+		  KVM_STATE_NESTED_PREEMPTION_TIMER))
 		: kvm_state->hdr.vmx.smm.flags)
 		return -EINVAL;
 
@@ -5966,6 +5995,23 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 			goto error_guest_mode;
 	}
 
+	if (kvm_state->flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
+
+		if (kvm_state->size <
+		    sizeof(*kvm_state) +
+		    sizeof(user_vmx_nested_state->vmcs12) +
+		    sizeof(user_vmx_nested_state->shadow_vmcs12) +
+		    sizeof(user_vmx_nested_state->preemption_timer_remaining))
+			goto error_guest_mode;
+
+		if (copy_from_user(&vmx->nested.preemption_timer_remaining,
+				   &user_vmx_nested_state->preemption_timer_remaining,
+				   sizeof(user_vmx_nested_state->preemption_timer_remaining))) {
+			ret = -EFAULT;
+			goto error_guest_mode;
+		}
+	}
+
 	if (nested_vmx_check_controls(vcpu, vmcs12) ||
 	    nested_vmx_check_host_state(vcpu, vmcs12) ||
 	    nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index aab9df55336ef..0098c7dc2e254 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -167,6 +167,7 @@ struct nested_vmx {
 	u16 posted_intr_nv;
 
 	struct hrtimer preemption_timer;
+	u32 preemption_timer_remaining;
 	bool preemption_timer_expired;
 
 	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 027dfd278a973..ddad6305a0d23 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3374,6 +3374,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_GET_MSR_FEATURES:
 	case KVM_CAP_MSR_PLATFORM_INFO:
 	case KVM_CAP_EXCEPTION_PAYLOAD:
+	case KVM_CAP_NESTED_PREEMPTION_TIMER:
 		r = 1;
 		break;
 	case KVM_CAP_SYNC_REGS:
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 428c7dde6b4b3..489c3df0b49c8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_VCPU_RESETS 179
 #define KVM_CAP_S390_PROTECTED 180
 #define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_NESTED_PREEMPTION_TIMER 182
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 3f3f780c8c650..ac8e5d391356c 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -391,6 +391,8 @@ struct kvm_sync_regs {
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
 #define KVM_STATE_NESTED_EVMCS		0x00000004
 #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
+/* Available with KVM_CAP_NESTED_PREEMPTION_TIMER */
+#define KVM_STATE_NESTED_PREEMPTION_TIMER	0x00000010
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
-- 
2.26.1.301.g55bc3eb7cb9-goog


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-17 18:34 [kvm PATCH 0/2] *** Enable VMX preemption timer migration *** Makarand Sonare
  2020-04-17 18:34 ` [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration Makarand Sonare
@ 2020-04-17 18:34 ` Makarand Sonare
  2020-04-22  1:57   ` Sean Christopherson
  1 sibling, 1 reply; 11+ messages in thread
From: Makarand Sonare @ 2020-04-17 18:34 UTC (permalink / raw)
  To: kvm, pshier, jmattson; +Cc: Makarand Sonare

Don't clobber the VMX-preemption timer value in the VMCS12 during
migration on the source while handling an L1 VMLAUNCH/VMRESUME but
before L2 ran. In that case the VMCS12 preemption timer value
should not be touched as it will be restarted on the target
from its original value. This emulates migration occurring while L1
awaits completion of its VMLAUNCH/VMRESUME instruction.

Signed-off-by: Makarand Sonare <makarandsonare@google.com>
Signed-off-by: Peter Shier <pshier@google.com>
Change-Id: I376d151585d4f1449319f7512151f11bbf08c5bf
---
 arch/x86/kvm/vmx/nested.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 5365d7e5921ea..66155e9114114 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3897,11 +3897,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
 
 	if (nested_cpu_has_preemption_timer(vmcs12)) {
-		vmx->nested.preemption_timer_remaining =
-			vmx_get_preemption_timer_value(vcpu);
-		if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
-			vmcs12->vmx_preemption_timer_value =
-				vmx->nested.preemption_timer_remaining;
+		if (!vmx->nested.nested_run_pending) {
+			vmx->nested.preemption_timer_remaining =
+				vmx_get_preemption_timer_value(vcpu);
+			if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+				vmcs12->vmx_preemption_timer_value =
+					vmx->nested.preemption_timer_remaining;
+			}
 	}
 
 	/*
-- 
2.26.1.301.g55bc3eb7cb9-goog


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-17 18:34 ` [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran Makarand Sonare
@ 2020-04-22  1:57   ` Sean Christopherson
  2020-04-22  2:02     ` Sean Christopherson
  0 siblings, 1 reply; 11+ messages in thread
From: Sean Christopherson @ 2020-04-22  1:57 UTC (permalink / raw)
  To: Makarand Sonare; +Cc: kvm, pshier, jmattson

On Fri, Apr 17, 2020 at 11:34:52AM -0700, Makarand Sonare wrote:
> Don't clobber the VMX-preemption timer value in the VMCS12 during
> migration on the source while handling an L1 VMLAUNCH/VMRESUME but
> before L2 ran. In that case the VMCS12 preemption timer value
> should not be touched as it will be restarted on the target
> from its original value. This emulates migration occurring while L1
> awaits completion of its VMLAUNCH/VMRESUME instruction.
> 
> Signed-off-by: Makarand Sonare <makarandsonare@google.com>
> Signed-off-by: Peter Shier <pshier@google.com>

The SOB tags are reversed, i.e. Peter's should be first to show that he
wrote the patch and then transfered it to you for upstreaming.

> Change-Id: I376d151585d4f1449319f7512151f11bbf08c5bf
> ---
>  arch/x86/kvm/vmx/nested.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 5365d7e5921ea..66155e9114114 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3897,11 +3897,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
>  
>  	if (nested_cpu_has_preemption_timer(vmcs12)) {
> -		vmx->nested.preemption_timer_remaining =
> -			vmx_get_preemption_timer_value(vcpu);
> -		if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> -			vmcs12->vmx_preemption_timer_value =
> -				vmx->nested.preemption_timer_remaining;
> +		if (!vmx->nested.nested_run_pending) {
> +			vmx->nested.preemption_timer_remaining =
> +				vmx_get_preemption_timer_value(vcpu);
> +			if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> +				vmcs12->vmx_preemption_timer_value =
> +					vmx->nested.preemption_timer_remaining;
> +			}

This indentation is messed up, the closing brace is for !nested_run_pending,
but it's aligned with (vm_exit_controls & ..._PREEMPTION_TIMER).

Even better than fixing the indentation would be to include !nested_run_pending
in the top-level if statement, which reduces the nesting level and produces
a much cleaner diff, e.g.

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 409a39af121f..7dd6440425ab 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3951,7 +3951,8 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        else
                vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;

-       if (nested_cpu_has_preemption_timer(vmcs12)) {
+       if (nested_cpu_has_preemption_timer(vmcs12) &&
+           !vmx->nested.nested_run_pending) {
                vmx->nested.preemption_timer_remaining =
                        vmx_get_preemption_timer_value(vcpu);
                if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)

>  	}
>  
>  	/*
> -- 
> 2.26.1.301.g55bc3eb7cb9-goog
> 

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-22  1:57   ` Sean Christopherson
@ 2020-04-22  2:02     ` Sean Christopherson
  2020-04-22 17:05       ` Jim Mattson
  0 siblings, 1 reply; 11+ messages in thread
From: Sean Christopherson @ 2020-04-22  2:02 UTC (permalink / raw)
  To: Makarand Sonare; +Cc: kvm, pshier, jmattson

On Tue, Apr 21, 2020 at 06:57:59PM -0700, Sean Christopherson wrote:
> On Fri, Apr 17, 2020 at 11:34:52AM -0700, Makarand Sonare wrote:
> > Don't clobber the VMX-preemption timer value in the VMCS12 during
> > migration on the source while handling an L1 VMLAUNCH/VMRESUME but
> > before L2 ran. In that case the VMCS12 preemption timer value
> > should not be touched as it will be restarted on the target
> > from its original value. This emulates migration occurring while L1
> > awaits completion of its VMLAUNCH/VMRESUME instruction.
> > 
> > Signed-off-by: Makarand Sonare <makarandsonare@google.com>
> > Signed-off-by: Peter Shier <pshier@google.com>
> 
> The SOB tags are reversed, i.e. Peter's should be first to show that he
> wrote the patch and then transfered it to you for upstreaming.
> 
> > Change-Id: I376d151585d4f1449319f7512151f11bbf08c5bf
> > ---
> >  arch/x86/kvm/vmx/nested.c | 12 +++++++-----
> >  1 file changed, 7 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > index 5365d7e5921ea..66155e9114114 100644
> > --- a/arch/x86/kvm/vmx/nested.c
> > +++ b/arch/x86/kvm/vmx/nested.c
> > @@ -3897,11 +3897,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> >  		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
> >  
> >  	if (nested_cpu_has_preemption_timer(vmcs12)) {
> > -		vmx->nested.preemption_timer_remaining =
> > -			vmx_get_preemption_timer_value(vcpu);
> > -		if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> > -			vmcs12->vmx_preemption_timer_value =
> > -				vmx->nested.preemption_timer_remaining;
> > +		if (!vmx->nested.nested_run_pending) {
> > +			vmx->nested.preemption_timer_remaining =
> > +				vmx_get_preemption_timer_value(vcpu);
> > +			if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> > +				vmcs12->vmx_preemption_timer_value =
> > +					vmx->nested.preemption_timer_remaining;
> > +			}
> 
> This indentation is messed up, the closing brace is for !nested_run_pending,
> but it's aligned with (vm_exit_controls & ..._PREEMPTION_TIMER).
> 
> Even better than fixing the indentation would be to include !nested_run_pending
> in the top-level if statement, which reduces the nesting level and produces
> a much cleaner diff, e.g.
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 409a39af121f..7dd6440425ab 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3951,7 +3951,8 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>         else
>                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
> 
> -       if (nested_cpu_has_preemption_timer(vmcs12)) {
> +       if (nested_cpu_has_preemption_timer(vmcs12) &&
> +           !vmx->nested.nested_run_pending) {
>                 vmx->nested.preemption_timer_remaining =
>                         vmx_get_preemption_timer_value(vcpu);
>                 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)

Actually, why is this a separate patch?  The code it's fixing was introduced
in patch one of this series.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration
  2020-04-17 18:34 ` [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration Makarand Sonare
@ 2020-04-22  2:36   ` Sean Christopherson
  0 siblings, 0 replies; 11+ messages in thread
From: Sean Christopherson @ 2020-04-22  2:36 UTC (permalink / raw)
  To: Makarand Sonare; +Cc: kvm, pshier, jmattson

On Fri, Apr 17, 2020 at 11:34:51AM -0700, Makarand Sonare wrote:
> From: Peter Shier <pshier@google.com>
> 
> Add new field to hold preemption timer remaining until expiration
> appended to struct kvm_vmx_nested_state_data. KVM_SET_NESTED_STATE restarts
> timer using migrated state regardless of whether L1 sets
> VM_EXIT_SAVE_VMX_PREEMPTION_TIMER.

The changelog should call out that simply stuffing vmcs12 will cause
incorrect behavior because the second (and later) VM-Enter after migration
will restart the timer with the wrong value.  It took me a bit to piece
together why the extra field was needed.

 
> -static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
> +static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
> +					u64 preemption_timeout)
>  {
> -	u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  
>  	/*
> @@ -3293,8 +3294,15 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
>  	 * the timer.
>  	 */
>  	vmx->nested.preemption_timer_expired = false;
> -	if (nested_cpu_has_preemption_timer(vmcs12))
> -		vmx_start_preemption_timer(vcpu);
> +	if (nested_cpu_has_preemption_timer(vmcs12)) {
> +		u64 preemption_timeout;

timer_value would be shorter, and IMO, a better name as well.  I'd even go
so far as to say throw on a follow-up patch to rename the variable in
vmx_start_preemption_timer.

> +		if (from_vmentry)
> +			preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;

vmcs12 is already available.

> +		else
> +			preemption_timeout = vmx->nested.preemption_timer_remaining;
> +		vmx_start_preemption_timer(vcpu, preemption_timeout);
> +	}
>  
>  	/*
>  	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
> @@ -3888,10 +3896,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  	else
>  		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
>  
> -	if (nested_cpu_has_preemption_timer(vmcs12) &&
> -	    vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> +	if (nested_cpu_has_preemption_timer(vmcs12)) {
> +		vmx->nested.preemption_timer_remaining =
> +			vmx_get_preemption_timer_value(vcpu);
> +		if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
>  			vmcs12->vmx_preemption_timer_value =
> -				vmx_get_preemption_timer_value(vcpu);
> +				vmx->nested.preemption_timer_remaining;
> +	}
>  
>  	/*
>  	 * In some cases (usually, nested EPT), L2 is allowed to change its
> @@ -5759,6 +5770,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
>  
>  			if (vmx->nested.mtf_pending)
>  				kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
> +
> +			if (nested_cpu_has_preemption_timer(vmcs12)) {
> +				kvm_state.flags |=
> +					KVM_STATE_NESTED_PREEMPTION_TIMER;
> +				kvm_state.size +=
> +					sizeof(user_vmx_nested_state->preemption_timer_remaining);
> +			}
>  		}
>  	}
>  
> @@ -5790,6 +5808,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
>  
>  	BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
>  	BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
> +	BUILD_BUG_ON(sizeof(user_vmx_nested_state->preemption_timer_remaining)
> +		    != sizeof(vmx->nested.preemption_timer_remaining));
> +
>  
>  	/*
>  	 * Copy over the full allocated size of vmcs12 rather than just the size
> @@ -5805,6 +5826,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
>  			return -EFAULT;
>  	}
>  
> +	if (kvm_state.flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
> +		if (copy_to_user(&user_vmx_nested_state->preemption_timer_remaining,
> +				 &vmx->nested.preemption_timer_remaining,
> +				 sizeof(vmx->nested.preemption_timer_remaining)))
> +			return -EFAULT;

Isn't this suitable for put_user()?  That would allow dropping the
long-winded BUILD_BUG_ON.

> +	}
> +
>  out:
>  	return kvm_state.size;
>  }
> @@ -5876,7 +5904,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
>  	 */
>  	if (is_smm(vcpu) ?
>  		(kvm_state->flags &
> -		 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
> +		 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING |
> +		  KVM_STATE_NESTED_PREEMPTION_TIMER))
>  		: kvm_state->hdr.vmx.smm.flags)
>  		return -EINVAL;
>  
> @@ -5966,6 +5995,23 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
>  			goto error_guest_mode;
>  	}
>  
> +	if (kvm_state->flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
> +
> +		if (kvm_state->size <
> +		    sizeof(*kvm_state) +
> +		    sizeof(user_vmx_nested_state->vmcs12) +
> +		    sizeof(user_vmx_nested_state->shadow_vmcs12) +
> +		    sizeof(user_vmx_nested_state->preemption_timer_remaining))

		if (kvm_state->size <
		    offsetof(struct kvm_nested_state, vmx) +
		    offsetofend(struct kvm_nested_state_data,
				preemption_timer_remaining))


> +			goto error_guest_mode;
> +
> +		if (copy_from_user(&vmx->nested.preemption_timer_remaining,
> +				   &user_vmx_nested_state->preemption_timer_remaining,
> +				   sizeof(user_vmx_nested_state->preemption_timer_remaining))) {
> +			ret = -EFAULT;
> +			goto error_guest_mode;
> +		}
> +	}
> +
>  	if (nested_vmx_check_controls(vcpu, vmcs12) ||
>  	    nested_vmx_check_host_state(vcpu, vmcs12) ||
>  	    nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index aab9df55336ef..0098c7dc2e254 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -167,6 +167,7 @@ struct nested_vmx {
>  	u16 posted_intr_nv;
>  
>  	struct hrtimer preemption_timer;
> +	u32 preemption_timer_remaining;
>  	bool preemption_timer_expired;
>  
>  	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 027dfd278a973..ddad6305a0d23 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3374,6 +3374,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>  	case KVM_CAP_GET_MSR_FEATURES:
>  	case KVM_CAP_MSR_PLATFORM_INFO:
>  	case KVM_CAP_EXCEPTION_PAYLOAD:
> +	case KVM_CAP_NESTED_PREEMPTION_TIMER:

Maybe KVM_CAP_NESTED_STATE_PREEMPTION_TIMER?

>  		r = 1;
>  		break;
>  	case KVM_CAP_SYNC_REGS:
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 428c7dde6b4b3..489c3df0b49c8 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
>  #define KVM_CAP_S390_VCPU_RESETS 179
>  #define KVM_CAP_S390_PROTECTED 180
>  #define KVM_CAP_PPC_SECURE_GUEST 181
> +#define KVM_CAP_NESTED_PREEMPTION_TIMER 182
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
> index 3f3f780c8c650..ac8e5d391356c 100644
> --- a/tools/arch/x86/include/uapi/asm/kvm.h
> +++ b/tools/arch/x86/include/uapi/asm/kvm.h
> @@ -391,6 +391,8 @@ struct kvm_sync_regs {
>  #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
>  #define KVM_STATE_NESTED_EVMCS		0x00000004
>  #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
> +/* Available with KVM_CAP_NESTED_PREEMPTION_TIMER */
> +#define KVM_STATE_NESTED_PREEMPTION_TIMER	0x00000010
>  
>  #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
>  #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
> -- 
> 2.26.1.301.g55bc3eb7cb9-goog
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-22  2:02     ` Sean Christopherson
@ 2020-04-22 17:05       ` Jim Mattson
  2020-04-22 23:08         ` Sean Christopherson
  2020-04-23 14:58         ` Paolo Bonzini
  0 siblings, 2 replies; 11+ messages in thread
From: Jim Mattson @ 2020-04-22 17:05 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: Makarand Sonare, kvm list, Peter Shier

On Tue, Apr 21, 2020 at 7:02 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Tue, Apr 21, 2020 at 06:57:59PM -0700, Sean Christopherson wrote:
> > On Fri, Apr 17, 2020 at 11:34:52AM -0700, Makarand Sonare wrote:
> > > Don't clobber the VMX-preemption timer value in the VMCS12 during
> > > migration on the source while handling an L1 VMLAUNCH/VMRESUME but
> > > before L2 ran. In that case the VMCS12 preemption timer value
> > > should not be touched as it will be restarted on the target
> > > from its original value. This emulates migration occurring while L1
> > > awaits completion of its VMLAUNCH/VMRESUME instruction.
> > >
> > > Signed-off-by: Makarand Sonare <makarandsonare@google.com>
> > > Signed-off-by: Peter Shier <pshier@google.com>
> >
> > The SOB tags are reversed, i.e. Peter's should be first to show that he
> > wrote the patch and then transfered it to you for upstreaming.
> >
> > > Change-Id: I376d151585d4f1449319f7512151f11bbf08c5bf
> > > ---
> > >  arch/x86/kvm/vmx/nested.c | 12 +++++++-----
> > >  1 file changed, 7 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > > index 5365d7e5921ea..66155e9114114 100644
> > > --- a/arch/x86/kvm/vmx/nested.c
> > > +++ b/arch/x86/kvm/vmx/nested.c
> > > @@ -3897,11 +3897,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> > >             vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
> > >
> > >     if (nested_cpu_has_preemption_timer(vmcs12)) {
> > > -           vmx->nested.preemption_timer_remaining =
> > > -                   vmx_get_preemption_timer_value(vcpu);
> > > -           if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> > > -                   vmcs12->vmx_preemption_timer_value =
> > > -                           vmx->nested.preemption_timer_remaining;
> > > +           if (!vmx->nested.nested_run_pending) {
> > > +                   vmx->nested.preemption_timer_remaining =
> > > +                           vmx_get_preemption_timer_value(vcpu);
> > > +                   if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> > > +                           vmcs12->vmx_preemption_timer_value =
> > > +                                   vmx->nested.preemption_timer_remaining;
> > > +                   }
> >
> > This indentation is messed up, the closing brace is for !nested_run_pending,
> > but it's aligned with (vm_exit_controls & ..._PREEMPTION_TIMER).
> >
> > Even better than fixing the indentation would be to include !nested_run_pending
> > in the top-level if statement, which reduces the nesting level and produces
> > a much cleaner diff, e.g.
> >
> > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > index 409a39af121f..7dd6440425ab 100644
> > --- a/arch/x86/kvm/vmx/nested.c
> > +++ b/arch/x86/kvm/vmx/nested.c
> > @@ -3951,7 +3951,8 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> >         else
> >                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
> >
> > -       if (nested_cpu_has_preemption_timer(vmcs12)) {
> > +       if (nested_cpu_has_preemption_timer(vmcs12) &&
> > +           !vmx->nested.nested_run_pending) {
> >                 vmx->nested.preemption_timer_remaining =
> >                         vmx_get_preemption_timer_value(vcpu);
> >                 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
>
> Actually, why is this a separate patch?  The code it's fixing was introduced
> in patch one of this series.

That's my fault. I questioned the legitimacy of this patch. When
nested_run_pending is set in sync_vmcs02_to_vmcs12, we are in the
middle of executing a VMLAUNCH or VMRESUME and userspace has requested
a dump of the nested state. At this point, we have already called
nested_vmx_enter_non_root_mode, and we have already started the timer.
Even though we are going to repeat the vmcs02 setup when restoring the
nested state, we do not actually rollback and restart the instruction.
Setting up the vmcs02 on the target is just something we have to do to
continue where we left off. Since we're continuing a partially
executed instruction after the restore rather than rolling back, I
think it's perfectly reasonable to go ahead and count the time elapsed
prior to KVM_GET_NESTED_STATE against L2's VMX-preemption timer.

I don't have a strong objection to this patch. It just seems to add
gratuitous complexity. If the consensus is to take it, the two parts
should be squashed together.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-22 17:05       ` Jim Mattson
@ 2020-04-22 23:08         ` Sean Christopherson
  2020-04-23 14:58         ` Paolo Bonzini
  1 sibling, 0 replies; 11+ messages in thread
From: Sean Christopherson @ 2020-04-22 23:08 UTC (permalink / raw)
  To: Jim Mattson; +Cc: Makarand Sonare, kvm list, Peter Shier

On Wed, Apr 22, 2020 at 10:05:45AM -0700, Jim Mattson wrote:
> On Tue, Apr 21, 2020 at 7:02 PM Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> > > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > > index 409a39af121f..7dd6440425ab 100644
> > > --- a/arch/x86/kvm/vmx/nested.c
> > > +++ b/arch/x86/kvm/vmx/nested.c
> > > @@ -3951,7 +3951,8 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
> > >         else
> > >                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
> > >
> > > -       if (nested_cpu_has_preemption_timer(vmcs12)) {
> > > +       if (nested_cpu_has_preemption_timer(vmcs12) &&
> > > +           !vmx->nested.nested_run_pending) {
> > >                 vmx->nested.preemption_timer_remaining =
> > >                         vmx_get_preemption_timer_value(vcpu);
> > >                 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
> >
> > Actually, why is this a separate patch?  The code it's fixing was introduced
> > in patch one of this series.
> 
> That's my fault. I questioned the legitimacy of this patch. When
> nested_run_pending is set in sync_vmcs02_to_vmcs12, we are in the
> middle of executing a VMLAUNCH or VMRESUME and userspace has requested
> a dump of the nested state. At this point, we have already called
> nested_vmx_enter_non_root_mode, and we have already started the timer.
> Even though we are going to repeat the vmcs02 setup when restoring the
> nested state, we do not actually rollback and restart the instruction.
> Setting up the vmcs02 on the target is just something we have to do to
> continue where we left off. Since we're continuing a partially
> executed instruction after the restore rather than rolling back, I
> think it's perfectly reasonable to go ahead and count the time elapsed
> prior to KVM_GET_NESTED_STATE against L2's VMX-preemption timer.

Counting the time lapsed seems reasonable, and is allowed from an
architectural standpoint.  It probably ends up being more accurate for the
KVM (as L1) use case where the preemption timer is being used to emulate
the TSC deadline timer, i.e. is less delayed.

> I don't have a strong objection to this patch. It just seems to add
> gratuitous complexity. If the consensus is to take it, the two parts
> should be squashed together.

I too don't have a strong opinion either way.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-22 17:05       ` Jim Mattson
  2020-04-22 23:08         ` Sean Christopherson
@ 2020-04-23 14:58         ` Paolo Bonzini
  2020-04-23 21:11           ` Jim Mattson
  1 sibling, 1 reply; 11+ messages in thread
From: Paolo Bonzini @ 2020-04-23 14:58 UTC (permalink / raw)
  To: Jim Mattson, Sean Christopherson; +Cc: Makarand Sonare, kvm list, Peter Shier

On 22/04/20 19:05, Jim Mattson wrote:
> I don't have a strong objection to this patch. It just seems to add
> gratuitous complexity. If the consensus is to take it, the two parts
> should be squashed together.

The complexity is not much if you just count lines of code, but I agree
with you that it's both allowed and more accurate.

Paolo


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-23 14:58         ` Paolo Bonzini
@ 2020-04-23 21:11           ` Jim Mattson
  2020-04-23 21:18             ` Paolo Bonzini
  0 siblings, 1 reply; 11+ messages in thread
From: Jim Mattson @ 2020-04-23 21:11 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: Sean Christopherson, Makarand Sonare, kvm list, Peter Shier

On Thu, Apr 23, 2020 at 7:58 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 22/04/20 19:05, Jim Mattson wrote:
> > I don't have a strong objection to this patch. It just seems to add
> > gratuitous complexity. If the consensus is to take it, the two parts
> > should be squashed together.
>
> The complexity is not much if you just count lines of code, but I agree
> with you that it's both allowed and more accurate.

It sounds like we're in agreement that this patch can be dropped?

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran
  2020-04-23 21:11           ` Jim Mattson
@ 2020-04-23 21:18             ` Paolo Bonzini
  0 siblings, 0 replies; 11+ messages in thread
From: Paolo Bonzini @ 2020-04-23 21:18 UTC (permalink / raw)
  To: Jim Mattson; +Cc: Sean Christopherson, Makarand Sonare, kvm list, Peter Shier

On 23/04/20 23:11, Jim Mattson wrote:
> On Thu, Apr 23, 2020 at 7:58 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>> On 22/04/20 19:05, Jim Mattson wrote:
>>> I don't have a strong objection to this patch. It just seems to add
>>> gratuitous complexity. If the consensus is to take it, the two parts
>>> should be squashed together.
>> The complexity is not much if you just count lines of code, but I agree
>> with you that it's both allowed and more accurate.
> It sounds like we're in agreement that this patch can be dropped?
> 

Yes.

Paolo


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-04-23 21:18 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-17 18:34 [kvm PATCH 0/2] *** Enable VMX preemption timer migration *** Makarand Sonare
2020-04-17 18:34 ` [kvm PATCH 1/2] KVM: nVMX - enable VMX preemption timer migration Makarand Sonare
2020-04-22  2:36   ` Sean Christopherson
2020-04-17 18:34 ` [kvm PATCH 2/2] KVM: nVMX: Don't clobber preemption timer in the VMCS12 before L2 ran Makarand Sonare
2020-04-22  1:57   ` Sean Christopherson
2020-04-22  2:02     ` Sean Christopherson
2020-04-22 17:05       ` Jim Mattson
2020-04-22 23:08         ` Sean Christopherson
2020-04-23 14:58         ` Paolo Bonzini
2020-04-23 21:11           ` Jim Mattson
2020-04-23 21:18             ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.