All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2 0/1] vhost-user interrupt management fixes
@ 2016-02-16 13:28 Victor Kaplansky
  2016-02-16 13:28 ` [Qemu-devel] [PATCH v2 1/1] " Victor Kaplansky
  0 siblings, 1 reply; 3+ messages in thread
From: Victor Kaplansky @ 2016-02-16 13:28 UTC (permalink / raw)
  To: qemu-devel; +Cc: thibaut.collet, jmg, Didier Pallard, Michael S. Tsirkin

Hi, this patch is a cosmetic rework of two patches originally sent
by Didier Pallard "[PATCH 2/3] virtio-pci: add an option to
bypass guest_notifier_mask" and "[PATCH 3/3] vhost-net: force
guest_notifier_mask bypass in vhost-user case".

The problem the patch solves is described in original posting
by Didie:
    http://lists.nongnu.org/archive/html/qemu-devel/2015-12/msg00652.html

I made only a basic testing. Didie could you take a look if
this still works for you? Also, as the change is very cosmetic,
I've put Didie as the author of the patch. Didie, is it OK with
you?

v2 changes:
 - a new boolean field is added to all virtio devices instead
   of defining a property in some virtio-pci devices.


Didier Pallard (1):
  vhost-user interrupt management fixes

 include/hw/virtio/virtio.h |  1 +
 hw/net/vhost_net.c         | 24 ++++++++++++++++++++++--
 hw/virtio/vhost.c          | 13 +++++++++++++
 hw/virtio/virtio-pci.c     | 15 +++++++++------
 hw/virtio/virtio.c         |  1 +
 5 files changed, 46 insertions(+), 8 deletions(-)

-- 
--Victor

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [Qemu-devel] [PATCH v2 1/1] vhost-user interrupt management fixes
  2016-02-16 13:28 [Qemu-devel] [PATCH v2 0/1] vhost-user interrupt management fixes Victor Kaplansky
@ 2016-02-16 13:28 ` Victor Kaplansky
  2016-02-16 13:58   ` Michael S. Tsirkin
  0 siblings, 1 reply; 3+ messages in thread
From: Victor Kaplansky @ 2016-02-16 13:28 UTC (permalink / raw)
  To: qemu-devel
  Cc: thibaut.collet, Jason Wang, jmg, Didier Pallard, Michael S. Tsirkin

From: Didier Pallard <didier.pallard@6wind.com>

Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.

Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.

Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.

Signed-off-by: Victor Kaplansky <victork@redhat.com>
---
 include/hw/virtio/virtio.h |  1 +
 hw/net/vhost_net.c         | 24 ++++++++++++++++++++++--
 hw/virtio/vhost.c          | 13 +++++++++++++
 hw/virtio/virtio-pci.c     | 15 +++++++++------
 hw/virtio/virtio.c         |  1 +
 5 files changed, 46 insertions(+), 8 deletions(-)

diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 108cdb0f..3acbf999 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -90,6 +90,7 @@ struct VirtIODevice
     VMChangeStateEntry *vmstate;
     char *bus_name;
     uint8_t device_endian;
+    bool use_mask_notifier;
     QLIST_HEAD(, VirtQueue) *vector_queues;
 };
 
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 3940a04b..22ba5571 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -17,6 +17,7 @@
 #include "net/net.h"
 #include "net/tap.h"
 #include "net/vhost-user.h"
+#include "hw/virtio/virtio-pci.h"
 
 #include "hw/virtio/virtio-net.h"
 #include "net/vhost_net.h"
@@ -306,13 +307,32 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
     }
 
     for (j = 0; j < total_queues; j++) {
+        struct vhost_net *net;
+
         r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
         if (r < 0) {
             goto err_endian;
         }
-        vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
-    }
 
+        net = get_vhost_net(ncs[j].peer);
+        vhost_net_set_vq_index(net, j * 2);
+
+        /* Force use_mask_notifier reset in vhost user case
+         * Must be done before set_guest_notifier call
+         */
+        if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+            BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+            DeviceState *d = DEVICE(qbus->parent);
+            if (!strcmp(object_get_typename(OBJECT(d)), TYPE_VIRTIO_NET_PCI)) {
+                VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+                VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+                /* Force virtual device not use mask notifier */
+                vdev->use_mask_notifier = false;
+            }
+        }
+     }
+ 
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 7dff7554..80744386 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
     /* Clear and discard previous events if any. */
     event_notifier_test_and_clear(&vq->masked_notifier);
 
+    /* If vhost user, register now the call eventfd, guest_notifier_mask
+     * function is not used anymore
+     */
+    if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
+        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
+        if (r) {
+            r = -errno;
+            goto fail_call;
+        }
+    }
+
     return 0;
 
+fail_call:
 fail_kick:
 fail_alloc:
     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5494ff4a..70f64cf7 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, set up irqfd now.
          * Otherwise, delay until unmasked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +822,7 @@ undo:
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, clean up irqfd now.
          * Otherwise, it was cleaned when masked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, irqfd is already setup, unmask it.
      * Otherwise, set it up now.
      */
-    if (k->guest_notifier_mask) {
+    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, false);
         /* Test after unmasking to avoid losing events. */
         if (k->guest_notifier_pending &&
@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, keep irqfd but mask it.
      * Otherwise, clean it up now.
      */ 
-    if (k->guest_notifier_mask) {
+    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, true);
     } else {
         kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
         event_notifier_cleanup(notifier);
     }
 
-    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+    if (!msix_enabled(&proxy->pci_dev) &&
+        vdev->use_mask_notifier &&
+        vdc->guest_notifier_mask) {
         vdc->guest_notifier_mask(vdev, n, !assign);
     }
 
@@ -1879,6 +1881,7 @@ static Property virtio_pci_properties[] = {
                     VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
     DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
+
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 90f25451..c0238b39 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
     vdev->queue_sel = 0;
     vdev->status = 0;
     vdev->isr = 0;
+    vdev->use_mask_notifier = true;
     vdev->config_vector = VIRTIO_NO_VECTOR;
     virtio_notify_vector(vdev, vdev->config_vector);
 
-- 
Victor

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/1] vhost-user interrupt management fixes
  2016-02-16 13:28 ` [Qemu-devel] [PATCH v2 1/1] " Victor Kaplansky
@ 2016-02-16 13:58   ` Michael S. Tsirkin
  0 siblings, 0 replies; 3+ messages in thread
From: Michael S. Tsirkin @ 2016-02-16 13:58 UTC (permalink / raw)
  To: Victor Kaplansky
  Cc: thibaut.collet, Jason Wang, jmg, qemu-devel, Didier Pallard

On Tue, Feb 16, 2016 at 03:28:06PM +0200, Victor Kaplansky wrote:
> From: Didier Pallard <didier.pallard@6wind.com>
> 
> Since guest_mask_notifier can not be used in vhost-user mode due
> to buffering implied by unix control socket, force
> use_mask_notifier on virtio devices of vhost-user interfaces, and
> send correct callfd to the guest at vhost start.
> 
> Using guest_notifier_mask function in vhost-user case may
> break interrupt mask paradigm, because mask/unmask is not
> really done when returning from guest_notifier_mask call, instead
> message is posted in a unix socket, and processed later.
> 
> Add an option boolean flag 'use_mask_notifier' to disable the use
> of guest_notifier_mask in virtio pci.
> 
> Signed-off-by: Victor Kaplansky <victork@redhat.com>
> ---
>  include/hw/virtio/virtio.h |  1 +
>  hw/net/vhost_net.c         | 24 ++++++++++++++++++++++--
>  hw/virtio/vhost.c          | 13 +++++++++++++
>  hw/virtio/virtio-pci.c     | 15 +++++++++------
>  hw/virtio/virtio.c         |  1 +
>  5 files changed, 46 insertions(+), 8 deletions(-)
> 
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 108cdb0f..3acbf999 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -90,6 +90,7 @@ struct VirtIODevice
>      VMChangeStateEntry *vmstate;
>      char *bus_name;
>      uint8_t device_endian;
> +    bool use_mask_notifier;
>      QLIST_HEAD(, VirtQueue) *vector_queues;
>  };
>  
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 3940a04b..22ba5571 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -17,6 +17,7 @@
>  #include "net/net.h"
>  #include "net/tap.h"
>  #include "net/vhost-user.h"
> +#include "hw/virtio/virtio-pci.h"
>  
>  #include "hw/virtio/virtio-net.h"
>  #include "net/vhost_net.h"

We definitely don't want a dependency on virtio-pci.


> @@ -306,13 +307,32 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
>      }
>  
>      for (j = 0; j < total_queues; j++) {
> +        struct vhost_net *net;
> +
>          r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
>          if (r < 0) {
>              goto err_endian;
>          }
> -        vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
> -    }
>  
> +        net = get_vhost_net(ncs[j].peer);
> +        vhost_net_set_vq_index(net, j * 2);
> +
> +        /* Force use_mask_notifier reset in vhost user case
> +         * Must be done before set_guest_notifier call
> +         */
> +        if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {

Please don't do this. Add some query from vhost and do the
right thing per backend.


> +            BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
> +            DeviceState *d = DEVICE(qbus->parent);
> +            if (!strcmp(object_get_typename(OBJECT(d)), TYPE_VIRTIO_NET_PCI)) {

Again, PCI dependency.


> +                VirtIOPCIProxy *proxy = VIRTIO_PCI(d);

And again here.

> +                VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
> +
> +                /* Force virtual device not use mask notifier */
> +                vdev->use_mask_notifier = false;
> +            }
> +        }
> +     }
> + 
>      r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
>      if (r < 0) {
>          error_report("Error binding guest notifier: %d", -r);
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 7dff7554..80744386 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>      /* Clear and discard previous events if any. */
>      event_notifier_test_and_clear(&vq->masked_notifier);
>  
> +    /* If vhost user, register now the call eventfd, guest_notifier_mask
> +     * function is not used anymore
> +     */

Can't this look at use_mask_notifier then?

> +    if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
> +        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
> +        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
> +        if (r) {
> +            r = -errno;
> +            goto fail_call;
> +        }
> +    }
> +
>      return 0;
>  
> +fail_call:
>  fail_kick:
>  fail_alloc:
>      cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 5494ff4a..70f64cf7 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
>          /* If guest supports masking, set up irqfd now.
>           * Otherwise, delay until unmasked in the frontend.
>           */
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
>              if (ret < 0) {
>                  kvm_virtio_pci_vq_vector_release(proxy, vector);

Would be cleaner to move guest_notifier_mask out from class
so we don't have to check two things.
But not a must.

> @@ -822,7 +822,7 @@ undo:
>          if (vector >= msix_nr_vectors_allocated(dev)) {
>              continue;
>          }
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>          }
>          kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
>          /* If guest supports masking, clean up irqfd now.
>           * Otherwise, it was cleaned when masked in the frontend.
>           */
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>          }
>          kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
>      /* If guest supports masking, irqfd is already setup, unmask it.
>       * Otherwise, set it up now.
>       */
> -    if (k->guest_notifier_mask) {
> +    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>          k->guest_notifier_mask(vdev, queue_no, false);
>          /* Test after unmasking to avoid losing events. */
>          if (k->guest_notifier_pending &&
> @@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
>      /* If guest supports masking, keep irqfd but mask it.
>       * Otherwise, clean it up now.
>       */ 
> -    if (k->guest_notifier_mask) {
> +    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>          k->guest_notifier_mask(vdev, queue_no, true);
>      } else {
>          kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> @@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
>          event_notifier_cleanup(notifier);
>      }
>  
> -    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
> +    if (!msix_enabled(&proxy->pci_dev) &&
> +        vdev->use_mask_notifier &&
> +        vdc->guest_notifier_mask) {
>          vdc->guest_notifier_mask(vdev, n, !assign);
>      }
>  
> @@ -1879,6 +1881,7 @@ static Property virtio_pci_properties[] = {
>                      VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
>      DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
>                      VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
> +
>      DEFINE_PROP_END_OF_LIST(),
>  };
>  

drop this one pls.

> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 90f25451..c0238b39 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
>      vdev->queue_sel = 0;
>      vdev->status = 0;
>      vdev->isr = 0;
> +    vdev->use_mask_notifier = true;
>      vdev->config_vector = VIRTIO_NO_VECTOR;
>      virtio_notify_vector(vdev, vdev->config_vector);
>  
> -- 
> Victor

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-02-16 13:59 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-16 13:28 [Qemu-devel] [PATCH v2 0/1] vhost-user interrupt management fixes Victor Kaplansky
2016-02-16 13:28 ` [Qemu-devel] [PATCH v2 1/1] " Victor Kaplansky
2016-02-16 13:58   ` Michael S. Tsirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.