All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 0/1] x86/kvm/hyper-v: Add support to SYNIC exit on EOM
@ 2020-04-09 16:37 Jon Doron
  2020-04-09 16:37 ` [PATCH v1 1/1] " Jon Doron
  0 siblings, 1 reply; 3+ messages in thread
From: Jon Doron @ 2020-04-09 16:37 UTC (permalink / raw)
  To: kvm, linux-hyperv; +Cc: vkuznets, Jon Doron

According to the TLFS:
"A write to the end of message (EOM) register by the guest causes the
hypervisor to scan the internal message buffer queue(s) associated with
the virtual processor.

If a message buffer queue contains a queued message buffer, the hypervisor
attempts to deliver the message.

Message delivery succeeds if the SIM page is enabled and the message slot
corresponding to the SINTx is empty (that is, the message type in the
header is set to HvMessageTypeNone).
If a message is successfully delivered, its corresponding internal message
buffer is dequeued and marked free.
If the corresponding SINTx is not masked, an edge-triggered interrupt is
delivered (that is, the corresponding bit in the IRR is set).

This register can be used by guests to poll for messages. It can also be
used as a way to drain the message queue for a SINTx that has
been disabled (that is, masked)."

So basically this means that we need to exit on EOM so the hypervisor
will have a chance to send all the pending messages regardless of the
SCONTROL mechnaisim.

Jon Doron (1):
  x86/kvm/hyper-v: Add support to SYNIC exit on EOM

 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/hyperv.c           | 65 +++++++++++++++++++++++++++++----
 arch/x86/kvm/hyperv.h           |  1 +
 arch/x86/kvm/x86.c              |  5 +++
 include/uapi/linux/kvm.h        |  1 +
 5 files changed, 65 insertions(+), 8 deletions(-)

-- 
2.24.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v1 1/1] x86/kvm/hyper-v: Add support to SYNIC exit on EOM
  2020-04-09 16:37 [PATCH v1 0/1] x86/kvm/hyper-v: Add support to SYNIC exit on EOM Jon Doron
@ 2020-04-09 16:37 ` Jon Doron
  2020-04-16  6:57   ` Vitaly Kuznetsov
  0 siblings, 1 reply; 3+ messages in thread
From: Jon Doron @ 2020-04-09 16:37 UTC (permalink / raw)
  To: kvm, linux-hyperv; +Cc: vkuznets, Jon Doron

According to the TLFS a write to the EOM register by the guest
causes the hypervisor to scan for any pending messages and if there
are any it will try to deliver them.

To do this we must exit so any pending messages can be written.

Signed-off-by: Jon Doron <arilou@gmail.com>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/hyperv.c           | 65 +++++++++++++++++++++++++++++----
 arch/x86/kvm/hyperv.h           |  1 +
 arch/x86/kvm/x86.c              |  5 +++
 include/uapi/linux/kvm.h        |  1 +
 5 files changed, 65 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 42a2d0d3984a..048a1db488e2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -548,6 +548,7 @@ struct kvm_vcpu_hv_synic {
 	DECLARE_BITMAP(vec_bitmap, 256);
 	bool active;
 	bool dont_zero_synic_pages;
+	bool enable_eom_exit;
 };
 
 /* Hyper-V per vcpu emulation context */
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index bcefa9d4e57e..7432f67b2746 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -186,6 +186,49 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
+static int synic_read_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
+			  struct hv_message_header *msg)
+{
+	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+	int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
+	gfn_t msg_page_gfn;
+	int r;
+
+	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
+		return -ENOENT;
+
+	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
+
+	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, msg, msg_off,
+				     sizeof(*msg));
+	if (r < 0)
+		return r;
+
+	return 0;
+}
+
+static bool synic_should_exit_for_eom(struct kvm_vcpu_hv_synic *synic)
+{
+	int i;
+
+	if (!synic->enable_eom_exit)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
+		struct hv_message_header hv_hdr;
+		/* If we failed to read from the msg slot then we treat this
+		 * msg slot as free */
+		if (synic_read_msg(synic, i, &hv_hdr) < 0)
+			continue;
+
+		/* See if this msg slot has a pending message */
+		if (hv_hdr.message_flags.msg_pending == 1)
+			return true;
+	}
+
+	return false;
+}
+
 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
 {
 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
@@ -254,6 +297,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
 
 		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
 			kvm_hv_notify_acked_sint(vcpu, i);
+
+		if (!host && synic_should_exit_for_eom(synic))
+			synic_exit(synic, msr);
 		break;
 	}
 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
@@ -571,8 +617,9 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
 	struct hv_message_header hv_hdr;
 	int r;
 
-	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
-		return -ENOENT;
+	r = synic_read_msg(synic, sint, &hv_hdr);
+	if (r < 0)
+		return r;
 
 	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
 
@@ -582,12 +629,6 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
 	 * is only called in vcpu context so the entire update is atomic from
 	 * guest POV and thus the exact order here doesn't matter.
 	 */
-	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
-				     msg_off + offsetof(struct hv_message,
-							header.message_type),
-				     sizeof(hv_hdr.message_type));
-	if (r < 0)
-		return r;
 
 	if (hv_hdr.message_type != HVMSG_NONE) {
 		if (no_retry)
@@ -785,6 +826,14 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
 	return 0;
 }
 
+int kvm_hv_synic_enable_eom(struct kvm_vcpu *vcpu)
+{
+	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+
+	synic->enable_eom_exit = true;
+	return 0;
+}
+
 static bool kvm_hv_msr_partition_wide(u32 msr)
 {
 	bool r = false;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 757cb578101c..ff89f0ff103c 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -56,6 +56,7 @@ void kvm_hv_irq_routing_update(struct kvm *kvm);
 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
+int kvm_hv_synic_enable_eom(struct kvm_vcpu *vcpu);
 
 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 027dfd278a97..0def4ab31dc1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3350,6 +3350,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_HYPERV_SPIN:
 	case KVM_CAP_HYPERV_SYNIC:
 	case KVM_CAP_HYPERV_SYNIC2:
+	case KVM_CAP_HYPERV_SYNIC_EOM:
 	case KVM_CAP_HYPERV_VP_INDEX:
 	case KVM_CAP_HYPERV_EVENTFD:
 	case KVM_CAP_HYPERV_TLBFLUSH:
@@ -4209,6 +4210,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 		return -EINVAL;
 
 	switch (cap->cap) {
+	case KVM_CAP_HYPERV_SYNIC_EOM:
+		kvm_hv_synic_enable_eom(vcpu);
+		return 0;
+
 	case KVM_CAP_HYPERV_SYNIC2:
 		if (cap->args[0])
 			return -EINVAL;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 428c7dde6b4b..78172ad156d8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_VCPU_RESETS 179
 #define KVM_CAP_S390_PROTECTED 180
 #define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_HYPERV_SYNIC_EOM 182
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v1 1/1] x86/kvm/hyper-v: Add support to SYNIC exit on EOM
  2020-04-09 16:37 ` [PATCH v1 1/1] " Jon Doron
@ 2020-04-16  6:57   ` Vitaly Kuznetsov
  0 siblings, 0 replies; 3+ messages in thread
From: Vitaly Kuznetsov @ 2020-04-16  6:57 UTC (permalink / raw)
  To: Jon Doron; +Cc: Jon Doron, Roman Kagan, kvm, linux-hyperv

Jon Doron <arilou@gmail.com> writes:

> According to the TLFS a write to the EOM register by the guest
> causes the hypervisor to scan for any pending messages and if there
> are any it will try to deliver them.
>
> To do this we must exit so any pending messages can be written.
>
> Signed-off-by: Jon Doron <arilou@gmail.com>

Roman says he's still with us so let's Cc: him.

> ---
>  arch/x86/include/asm/kvm_host.h |  1 +
>  arch/x86/kvm/hyperv.c           | 65 +++++++++++++++++++++++++++++----
>  arch/x86/kvm/hyperv.h           |  1 +
>  arch/x86/kvm/x86.c              |  5 +++
>  include/uapi/linux/kvm.h        |  1 +
>  5 files changed, 65 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 42a2d0d3984a..048a1db488e2 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -548,6 +548,7 @@ struct kvm_vcpu_hv_synic {
>  	DECLARE_BITMAP(vec_bitmap, 256);
>  	bool active;
>  	bool dont_zero_synic_pages;
> +	bool enable_eom_exit;
>  };
>  
>  /* Hyper-V per vcpu emulation context */
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index bcefa9d4e57e..7432f67b2746 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -186,6 +186,49 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
>  	srcu_read_unlock(&kvm->irq_srcu, idx);
>  }
>  
> +static int synic_read_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
> +			  struct hv_message_header *msg)

I'd suggest to rename this to 'synic_read_msg_hdr()' as we don't
actually read the message here, just the header.

> +{
> +	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
> +	int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
> +	gfn_t msg_page_gfn;
> +	int r;
> +
> +	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
> +		return -ENOENT;
> +
> +	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
> +
> +	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, msg, msg_off,
> +				     sizeof(*msg));
> +	if (r < 0)
> +		return r;
> +
> +	return 0;
> +}
> +
> +static bool synic_should_exit_for_eom(struct kvm_vcpu_hv_synic *synic)

'for_eom' or 'on_eom'?

> +{
> +	int i;
> +
> +	if (!synic->enable_eom_exit)
> +		return false;
> +
> +	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
> +		struct hv_message_header hv_hdr;
> +		/* If we failed to read from the msg slot then we treat this
> +		 * msg slot as free */

Coding style: multi-line comments should look like
  /*
   * line1
   * line2
   * ...
   */

> +		if (synic_read_msg(synic, i, &hv_hdr) < 0)
> +			continue;
> +
> +		/* See if this msg slot has a pending message */
> +		if (hv_hdr.message_flags.msg_pending == 1)
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
>  static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
>  {
>  	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
> @@ -254,6 +297,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
>  
>  		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
>  			kvm_hv_notify_acked_sint(vcpu, i);
> +
> +		if (!host && synic_should_exit_for_eom(synic))
> +			synic_exit(synic, msr);

Generally, message communication is not performance critical, however,
we have synthetic timers and in case the new KVM_CAP_HYPERV_SYNIC_EOM
cap is enabled we will be exiting to userspace for every timer
expiration. This will measurably slow down the guest I'm afraid.

Would it be possible to come up with an interface for userspace to tell
KVM that it has a pending message for the guest and only exit to
userspace in this case? Or maybe we need to queue all messages from
userspace in KVM and deliver them when we get a chance and not exit to
userspace at all?

>  		break;
>  	}
>  	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
> @@ -571,8 +617,9 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
>  	struct hv_message_header hv_hdr;
>  	int r;
>  
> -	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
> -		return -ENOENT;
> +	r = synic_read_msg(synic, sint, &hv_hdr);
> +	if (r < 0)
> +		return r;
>  
>  	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
>  
> @@ -582,12 +629,6 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
>  	 * is only called in vcpu context so the entire update is atomic from
>  	 * guest POV and thus the exact order here doesn't matter.
>  	 */
> -	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
> -				     msg_off + offsetof(struct hv_message,
> -							header.message_type),
> -				     sizeof(hv_hdr.message_type));
> -	if (r < 0)
> -		return r;
>  
>  	if (hv_hdr.message_type != HVMSG_NONE) {
>  		if (no_retry)
> @@ -785,6 +826,14 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
>  	return 0;
>  }
>  
> +int kvm_hv_synic_enable_eom(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
> +
> +	synic->enable_eom_exit = true;
> +	return 0;
> +}
> +
>  static bool kvm_hv_msr_partition_wide(u32 msr)
>  {
>  	bool r = false;
> diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
> index 757cb578101c..ff89f0ff103c 100644
> --- a/arch/x86/kvm/hyperv.h
> +++ b/arch/x86/kvm/hyperv.h
> @@ -56,6 +56,7 @@ void kvm_hv_irq_routing_update(struct kvm *kvm);
>  int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
>  void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
>  int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
> +int kvm_hv_synic_enable_eom(struct kvm_vcpu *vcpu);
>  
>  void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
>  void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 027dfd278a97..0def4ab31dc1 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3350,6 +3350,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>  	case KVM_CAP_HYPERV_SPIN:
>  	case KVM_CAP_HYPERV_SYNIC:
>  	case KVM_CAP_HYPERV_SYNIC2:
> +	case KVM_CAP_HYPERV_SYNIC_EOM:
>  	case KVM_CAP_HYPERV_VP_INDEX:
>  	case KVM_CAP_HYPERV_EVENTFD:
>  	case KVM_CAP_HYPERV_TLBFLUSH:
> @@ -4209,6 +4210,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
>  		return -EINVAL;
>  
>  	switch (cap->cap) {
> +	case KVM_CAP_HYPERV_SYNIC_EOM:
> +		kvm_hv_synic_enable_eom(vcpu);
> +		return 0;
> +
>  	case KVM_CAP_HYPERV_SYNIC2:
>  		if (cap->args[0])
>  			return -EINVAL;
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 428c7dde6b4b..78172ad156d8 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
>  #define KVM_CAP_S390_VCPU_RESETS 179
>  #define KVM_CAP_S390_PROTECTED 180
>  #define KVM_CAP_PPC_SECURE_GUEST 181
> +#define KVM_CAP_HYPERV_SYNIC_EOM 182
>  
>  #ifdef KVM_CAP_IRQ_ROUTING

-- 
Vitaly


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-04-16  6:57 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-09 16:37 [PATCH v1 0/1] x86/kvm/hyper-v: Add support to SYNIC exit on EOM Jon Doron
2020-04-09 16:37 ` [PATCH v1 1/1] " Jon Doron
2020-04-16  6:57   ` Vitaly Kuznetsov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.