qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Didier Pallard <didier.pallard@6wind.com>
To: qemu-devel@nongnu.org
Cc: thibaut.collet@6wind.com, jmg@6wind.com,
	Didier Pallard <didier.pallard@6wind.com>
Subject: [Qemu-devel] [PATCH 2/3] virtio-pci: add an option to bypass guest_notifier_mask
Date: Thu,  3 Dec 2015 10:53:18 +0100	[thread overview]
Message-ID: <1449136399-4158-3-git-send-email-didier.pallard@6wind.com> (raw)
In-Reply-To: <1449136399-4158-1-git-send-email-didier.pallard@6wind.com>

Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.

Add an option bit to disable the use of guest_notifier_mask
in virtio pci.

Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
Reviewed-by: Thibaut Collet <thibaut.collet@6wind.com>
---
 hw/virtio/virtio-pci.c | 29 +++++++++++++++++++++++------
 hw/virtio/virtio-pci.h |  6 ++++++
 2 files changed, 29 insertions(+), 6 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index dd48562..26bb617 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, set up irqfd now.
          * Otherwise, delay until unmasked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+            k->guest_notifier_mask) {
             ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +823,8 @@ undo:
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
-        if (k->guest_notifier_mask) {
+        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+            k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +851,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
         /* If guest supports masking, clean up irqfd now.
          * Otherwise, it was cleaned when masked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+            k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +885,8 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, irqfd is already setup, unmask it.
      * Otherwise, set it up now.
      */
-    if (k->guest_notifier_mask) {
+    if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+        k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, false);
         /* Test after unmasking to avoid losing events. */
         if (k->guest_notifier_pending &&
@@ -905,7 +909,8 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, keep irqfd but mask it.
      * Otherwise, clean it up now.
      */ 
-    if (k->guest_notifier_mask) {
+    if ((proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+        k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, true);
     } else {
         kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1027,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
         event_notifier_cleanup(notifier);
     }
 
-    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+    if (!msix_enabled(&proxy->pci_dev) &&
+        (proxy->flags & VIRTIO_PCI_FLAG_USE_NOTIFIERMASK) &&
+        vdc->guest_notifier_mask) {
         vdc->guest_notifier_mask(vdev, n, !assign);
     }
 
@@ -1164,6 +1171,8 @@ static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
 static Property virtio_9p_pci_properties[] = {
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
+                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
     DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
     DEFINE_PROP_END_OF_LIST(),
 };
@@ -1908,6 +1917,8 @@ static Property virtio_blk_pci_properties[] = {
     DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
+                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
     DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
     DEFINE_PROP_END_OF_LIST(),
 };
@@ -1961,6 +1972,8 @@ static const TypeInfo virtio_blk_pci_info = {
 static Property virtio_scsi_pci_properties[] = {
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
+                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
     DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
                        DEV_NVECTORS_UNSPECIFIED),
     DEFINE_PROP_END_OF_LIST(),
@@ -2175,6 +2188,8 @@ static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
 static Property virtio_serial_pci_properties[] = {
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
+                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
     DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
     DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
     DEFINE_PROP_END_OF_LIST(),
@@ -2215,6 +2230,8 @@ static const TypeInfo virtio_serial_pci_info = {
 static Property virtio_net_properties[] = {
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
+    DEFINE_PROP_BIT("usenotifiermask", VirtIOPCIProxy, flags,
+                    VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT, true),
     DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
     DEFINE_PROP_END_OF_LIST(),
 };
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index ffb74bb..aecd4eb 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -86,6 +86,12 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
 #define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
     (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
 
+/* Where vhost-user implementation exists, using the guest notifier mask
+ * feature can lead to improper interrupt management. Add a flag to
+ * allow to disable this guest notifier mask if desired */
+#define VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT 6
+#define VIRTIO_PCI_FLAG_USE_NOTIFIERMASK (1 << VIRTIO_PCI_FLAG_USE_NOTIFIERMASK_BIT)
+
 typedef struct {
     MSIMessage msg;
     int virq;
-- 
2.1.4

  parent reply	other threads:[~2015-12-03  9:53 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-03  9:53 [Qemu-devel] Linux vhost-user interrupt management fixes Didier Pallard
2015-12-03  9:53 ` [Qemu-devel] [PATCH 1/3] char: fix vhost-user socket full Didier Pallard
2015-12-07 13:31   ` Marc-André Lureau
2015-12-09 15:59     ` Victor Kaplansky
2015-12-09 17:06       ` Didier Pallard
2015-12-10 12:56         ` Victor Kaplansky
2015-12-10 15:09           ` Didier Pallard
2015-12-17 14:41             ` Victor Kaplansky
2016-02-04 13:13   ` Michael S. Tsirkin
2016-02-04 14:10   ` Michael S. Tsirkin
2016-02-08 13:12     ` Didier Pallard
2016-02-09 11:37       ` Michael S. Tsirkin
2016-02-09 11:48     ` Daniel P. Berrange
2016-02-09 12:21       ` Michael S. Tsirkin
2016-02-09 16:17         ` Didier Pallard
2016-02-09 16:50           ` Michael S. Tsirkin
2016-02-09 17:04           ` Daniel P. Berrange
2016-02-10  9:35             ` Didier Pallard
2016-02-10 11:53               ` Michael S. Tsirkin
2016-02-10 12:15                 ` Daniel P. Berrange
2016-02-19  9:09                   ` Didier Pallard
2015-12-03  9:53 ` Didier Pallard [this message]
2015-12-07 13:37   ` [Qemu-devel] [PATCH 2/3] virtio-pci: add an option to bypass guest_notifier_mask Marc-André Lureau
2015-12-07 13:59     ` Marc-André Lureau
2015-12-09 15:06       ` Didier Pallard
2016-02-04 13:08   ` Michael S. Tsirkin
2016-02-08 13:24     ` Didier Pallard
2016-02-15 15:38   ` Victor Kaplansky
2015-12-03  9:53 ` [Qemu-devel] [PATCH 3/3] vhost-net: force guest_notifier_mask bypass in vhost-user case Didier Pallard
2016-02-04 13:06   ` Michael S. Tsirkin
2015-12-04 10:04 ` [Qemu-devel] Linux vhost-user interrupt management fixes Didier Pallard
2016-01-25  9:22 ` Victor Kaplansky
2016-01-26  9:25   ` Didier Pallard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1449136399-4158-3-git-send-email-didier.pallard@6wind.com \
    --to=didier.pallard@6wind.com \
    --cc=jmg@6wind.com \
    --cc=qemu-devel@nongnu.org \
    --cc=thibaut.collet@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).