qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, mst@redhat.com, jasowang@redhat.com,
	qemu-devel@nongnu.org
Subject: [PATCH v6 5/9] vhost:add support for configure interrupt
Date: Tue, 27 Apr 2021 11:39:47 +0800	[thread overview]
Message-ID: <20210427033951.29805-6-lulu@redhat.com> (raw)
In-Reply-To: <20210427033951.29805-1-lulu@redhat.com>

Add configure notifier support in vhost and related driver
When backend support VIRTIO_NET_F_STATUS,setup the configure
interrupt function in vhost_dev_start and release the related
resource when vhost_dev_stop

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/net/vhost_net.c         |  9 +++++
 hw/net/virtio-net.c        |  6 ++++
 hw/virtio/vhost.c          | 70 ++++++++++++++++++++++++++++++++++++--
 hw/virtio/virtio.c         | 22 ++++++++++++
 include/hw/virtio/vhost.h  |  3 ++
 include/hw/virtio/virtio.h |  4 +++
 include/net/vhost_net.h    |  3 ++
 7 files changed, 115 insertions(+), 2 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 24d555e764..12e30dc25e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -426,6 +426,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
     vhost_virtqueue_mask(&net->dev, dev, idx, mask);
 }
 
+bool vhost_net_config_pending(VHostNetState *net, int idx)
+{
+    return vhost_config_pending(&net->dev, idx);
+}
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask)
+{
+    vhost_config_mask(&net->dev, dev,  mask);
+}
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
     VHostNetState *vhost_net = 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 78ccaa228c..43b912453a 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3063,6 +3063,9 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
     if (idx != -1) {
         return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
     }
+    if (idx == -1) {
+        return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
+   }
     return false;
 }
 
@@ -3075,6 +3078,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
     if (idx != -1) {
         vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
      }
+    if (idx == -1) {
+        vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
+     }
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 614ccc2bcb..162a5dd90c 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -21,6 +21,7 @@
 #include "qemu/error-report.h"
 #include "qemu/memfd.h"
 #include "standard-headers/linux/vhost_types.h"
+#include "standard-headers/linux/virtio_net.h"
 #include "exec/address-spaces.h"
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-access.h"
@@ -1313,6 +1314,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
             goto fail;
         }
     }
+    r = event_notifier_init(&hdev->masked_config_notifier, 0);
+    if (r < 0) {
+        return r;
+    }
 
     if (busyloop_timeout) {
         for (i = 0; i < hdev->nvqs; ++i) {
@@ -1405,6 +1410,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
     for (i = 0; i < hdev->nvqs; ++i) {
         vhost_virtqueue_cleanup(hdev->vqs + i);
     }
+    event_notifier_cleanup(&hdev->masked_config_notifier);
     if (hdev->mem) {
         /* those are only safe after successful init */
         memory_listener_unregister(&hdev->memory_listener);
@@ -1498,6 +1504,16 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
     return event_notifier_test_and_clear(&vq->masked_notifier);
 }
 
+bool vhost_config_pending(struct vhost_dev *hdev, int n)
+{
+    assert(hdev->vhost_ops);
+
+    if ((hdev->started == false) ||
+        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+        return false;
+    }
+    return event_notifier_test_and_clear(&hdev->masked_config_notifier);
+}
 /* Mask/unmask events from this vq. */
 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                          bool mask)
@@ -1522,6 +1538,30 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
     }
 }
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+                         bool mask)
+{
+   int fd;
+   int r;
+   EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
+   EventNotifier *config_notifier = &vdev->config_notifier;
+   assert(hdev->vhost_ops);
+
+   if ((hdev->started == false) ||
+        (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+        return ;
+    }
+    if (mask) {
+        assert(vdev->use_guest_notifier_mask);
+        fd = event_notifier_get_fd(masked_config_notifier);
+    } else {
+        fd = event_notifier_get_fd(config_notifier);
+    }
+   r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+   if (r < 0) {
+        error_report("vhost_set_config_call failed");
+    }
+}
 
 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                             uint64_t features)
@@ -1701,6 +1741,7 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     int i, r;
+    int fd = 0;
 
     /* should only be called after backend is connected */
     assert(hdev->vhost_ops);
@@ -1732,7 +1773,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             goto fail_vq;
         }
     }
-
+    event_notifier_test_and_clear(&hdev->masked_config_notifier);
+    if (!vdev->use_guest_notifier_mask) {
+        vhost_config_mask(hdev, vdev,  true);
+    }
     if (hdev->log_enabled) {
         uint64_t log_base;
 
@@ -1749,6 +1793,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             goto fail_log;
         }
     }
+
     if (hdev->vhost_ops->vhost_dev_start) {
         r = hdev->vhost_ops->vhost_dev_start(hdev, true);
         if (r) {
@@ -1766,6 +1811,19 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
         }
     }
+   if (!(hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
+        return 0;
+    }
+    if (hdev->vhost_ops->vhost_set_config_call) {
+        fd = event_notifier_get_fd(&vdev->config_notifier);
+        r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+        if (!r) {
+            event_notifier_set(&vdev->config_notifier);
+        }
+        if (r) {
+            goto fail_log;
+         }
+    }
     return 0;
 fail_log:
     vhost_log_put(hdev, false);
@@ -1788,10 +1846,18 @@ fail_features:
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
 {
     int i;
+    int fd;
 
     /* should only be called after backend is connected */
     assert(hdev->vhost_ops);
-
+    event_notifier_test_and_clear(&hdev->masked_config_notifier);
+    event_notifier_test_and_clear(&vdev->config_notifier);
+    if ((hdev->features & (0x1ULL << VIRTIO_NET_F_STATUS))) {
+        if (hdev->vhost_ops->vhost_set_config_call) {
+            fd = -1;
+            hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+        }
+    }
     if (hdev->vhost_ops->vhost_dev_start) {
         hdev->vhost_ops->vhost_dev_start(hdev, false);
     }
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index ceb58fda6c..5dff29c981 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3502,6 +3502,14 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
     }
 }
 
+static void virtio_config_read(EventNotifier *n)
+{
+    VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
+
+    if (event_notifier_test_and_clear(n)) {
+        virtio_notify_config(vdev);
+    }
+}
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd)
 {
@@ -3517,6 +3525,16 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
         virtio_queue_guest_notifier_read(&vq->guest_notifier);
     }
 }
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd)
+{
+    if (assign && !with_irqfd) {
+        event_notifier_set_handler(&vdev->config_notifier,
+                                   virtio_config_read);
+    } else {
+       event_notifier_set_handler(&vdev->config_notifier, NULL);
+    }
+}
 
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
 {
@@ -3591,6 +3609,10 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
     return &vq->host_notifier;
 }
 
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
+{
+    return &vdev->config_notifier;
+}
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
 {
     vq->host_notifier_enabled = enabled;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 4a8bc75415..22efa7008e 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -91,6 +91,7 @@ struct vhost_dev {
     QLIST_HEAD(, vhost_iommu) iommu_list;
     IOMMUNotifier n;
     const VhostDevConfigOps *config_ops;
+    EventNotifier masked_config_notifier;
 };
 
 struct vhost_net {
@@ -108,6 +109,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev, int n);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,  bool mask);
 
 /* Test and clear masked event pending status.
  * Should be called after unmask to avoid losing events.
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index b7ece7a6a8..b0b714f6d4 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -108,6 +108,7 @@ struct VirtIODevice
     bool use_guest_notifier_mask;
     AddressSpace *dma_as;
     QLIST_HEAD(, VirtQueue) *vector_queues;
+    EventNotifier config_notifier;
 };
 
 struct VirtioDeviceClass {
@@ -310,11 +311,14 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
                                                 bool with_irqfd);
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+                                                bool with_irqfd);
 int virtio_device_start_ioeventfd(VirtIODevice *vdev);
 int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
 void virtio_device_release_ioeventfd(VirtIODevice *vdev);
 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
 void virtio_queue_host_notifier_read(EventNotifier *n);
 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 172b0051d8..0d38c97c94 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
 bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
                               int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net, int n);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+                              bool mask);
 int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
 VHostNetState *get_vhost_net(NetClientState *nc);
 
-- 
2.21.3



  parent reply	other threads:[~2021-04-27  3:49 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-27  3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-04-27  3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
2021-04-27  5:39   ` Jason Wang
2021-04-29  3:08     ` Cindy Lu
2021-04-27  3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
2021-04-27  6:40   ` Jason Wang
2021-04-27  7:17     ` Jason Wang
2021-04-27  3:39 ` [PATCH v6 3/9] vhost: add new call back function for config interrupt Cindy Lu
2021-04-27  3:39 ` [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back Cindy Lu
2021-04-27  3:39 ` Cindy Lu [this message]
2021-04-27  7:04   ` [PATCH v6 5/9] vhost:add support for configure interrupt Jason Wang
2021-04-27  3:39 ` [PATCH v6 6/9] virtio-mmio: add " Cindy Lu
2021-04-27  3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
2021-04-27  7:12   ` Jason Wang
2021-04-29  3:07     ` Cindy Lu
2021-04-27  3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
2021-04-27  7:14   ` Jason Wang
2021-04-27  3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
2021-04-27  7:15   ` Jason Wang
2021-04-27  3:57 ` [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210427033951.29805-6-lulu@redhat.com \
    --to=lulu@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).