qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Parav Pandit <parav@mellanox.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	virtualization@lists.linux-foundation.org,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Xiao W Wang <xiao.w.wang@intel.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Eli Cohen <eli@mellanox.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Michael Lilja <ml@napatech.com>,
	Jim Harford <jim.harford@broadcom.com>,
	Rob Miller <rob.miller@broadcom.com>
Subject: [RFC 09/10] vhost: Route guest->host notification through shadow virtqueue
Date: Fri, 29 Jan 2021 21:54:14 +0100	[thread overview]
Message-ID: <20210129205415.876290-10-eperezma@redhat.com> (raw)
In-Reply-To: <20210129205415.876290-1-eperezma@redhat.com>

Shadow virtqueue notifications forwarding is disabled when vhost_dev
stops.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h |   5 ++
 include/hw/virtio/vhost.h          |   4 +
 hw/virtio/vhost-shadow-virtqueue.c | 123 +++++++++++++++++++++++++-
 hw/virtio/vhost.c                  | 135 ++++++++++++++++++++++++++++-
 4 files changed, 264 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 6cc18d6acb..466f8ae595 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -17,6 +17,11 @@
 
 typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
 
+bool vhost_shadow_vq_start_rcu(struct vhost_dev *dev,
+                               VhostShadowVirtqueue *svq);
+void vhost_shadow_vq_stop_rcu(struct vhost_dev *dev,
+                              VhostShadowVirtqueue *svq);
+
 VhostShadowVirtqueue *vhost_shadow_vq_new(struct vhost_dev *dev, int idx);
 
 void vhost_shadow_vq_free(VhostShadowVirtqueue *vq);
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 2be782cefd..732a4b2a2b 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -55,6 +55,8 @@ struct vhost_iommu {
     QLIST_ENTRY(vhost_iommu) iommu_next;
 };
 
+typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
+
 typedef struct VhostDevConfigOps {
     /* Vhost device config space changed callback
      */
@@ -83,7 +85,9 @@ struct vhost_dev {
     uint64_t backend_cap;
     bool started;
     bool log_enabled;
+    bool sw_lm_enabled;
     uint64_t log_size;
+    VhostShadowVirtqueue **shadow_vqs;
     Error *migration_blocker;
     const VhostOps *vhost_ops;
     void *opaque;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index c0c967a7c5..908c36c66d 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -8,15 +8,129 @@
  */
 
 #include "hw/virtio/vhost-shadow-virtqueue.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/virtio-access.h"
+
+#include "standard-headers/linux/vhost_types.h"
+#include "standard-headers/linux/virtio_ring.h"
 
 #include "qemu/error-report.h"
-#include "qemu/event_notifier.h"
+#include "qemu/main-loop.h"
 
 typedef struct VhostShadowVirtqueue {
     EventNotifier kick_notifier;
     EventNotifier call_notifier;
+    const struct vhost_virtqueue *hvq;
+    VirtIODevice *vdev;
+    VirtQueue *vq;
 } VhostShadowVirtqueue;
 
+static uint16_t vhost_shadow_vring_used_flags(VhostShadowVirtqueue *svq)
+{
+    const struct vring_used *used = svq->hvq->used;
+    return virtio_tswap16(svq->vdev, used->flags);
+}
+
+static bool vhost_shadow_vring_should_kick(VhostShadowVirtqueue *vq)
+{
+    return !(vhost_shadow_vring_used_flags(vq) & VRING_USED_F_NO_NOTIFY);
+}
+
+static void vhost_shadow_vring_kick(VhostShadowVirtqueue *vq)
+{
+    if (vhost_shadow_vring_should_kick(vq)) {
+        event_notifier_set(&vq->kick_notifier);
+    }
+}
+
+static void handle_shadow_vq(VirtIODevice *vdev, VirtQueue *vq)
+{
+    struct vhost_dev *hdev = vhost_dev_from_virtio(vdev);
+    uint16_t idx = virtio_get_queue_index(vq);
+
+    VhostShadowVirtqueue *svq = hdev->shadow_vqs[idx];
+
+    vhost_shadow_vring_kick(svq);
+}
+
+/*
+ * Start shadow virtqueue operation.
+ * @dev vhost device
+ * @svq Shadow Virtqueue
+ *
+ * Run in RCU context
+ */
+bool vhost_shadow_vq_start_rcu(struct vhost_dev *dev,
+                               VhostShadowVirtqueue *svq)
+{
+    const VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(dev->vdev);
+    EventNotifier *vq_host_notifier = virtio_queue_get_host_notifier(svq->vq);
+    unsigned idx = virtio_queue_get_idx(svq->vdev, svq->vq);
+    struct vhost_vring_file kick_file = {
+        .index = idx,
+        .fd = event_notifier_get_fd(&svq->kick_notifier),
+    };
+    int r;
+    bool ok;
+
+    /* Check that notifications are still going directly to vhost dev */
+    assert(virtio_queue_host_notifier_status(svq->vq));
+
+    ok = k->set_vq_handler(dev->vdev, idx, handle_shadow_vq);
+    if (!ok) {
+        error_report("Couldn't set the vq handler");
+        goto err_set_kick_handler;
+    }
+
+    r = dev->vhost_ops->vhost_set_vring_kick(dev, &kick_file);
+    if (r != 0) {
+        error_report("Couldn't set kick fd: %s", strerror(errno));
+        goto err_set_vring_kick;
+    }
+
+    event_notifier_set_handler(vq_host_notifier,
+                               virtio_queue_host_notifier_read);
+    virtio_queue_set_host_notifier_enabled(svq->vq, false);
+    virtio_queue_host_notifier_read(vq_host_notifier);
+
+    return true;
+
+err_set_vring_kick:
+    k->set_vq_handler(dev->vdev, idx, NULL);
+
+err_set_kick_handler:
+    return false;
+}
+
+/*
+ * Stop shadow virtqueue operation.
+ * @dev vhost device
+ * @svq Shadow Virtqueue
+ *
+ * Run in RCU context
+ */
+void vhost_shadow_vq_stop_rcu(struct vhost_dev *dev,
+                              VhostShadowVirtqueue *svq)
+{
+    const VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(svq->vdev);
+    unsigned idx = virtio_queue_get_idx(svq->vdev, svq->vq);
+    EventNotifier *vq_host_notifier = virtio_queue_get_host_notifier(svq->vq);
+    struct vhost_vring_file kick_file = {
+        .index = idx,
+        .fd = event_notifier_get_fd(vq_host_notifier),
+    };
+    int r;
+
+    /* Restore vhost kick */
+    r = dev->vhost_ops->vhost_set_vring_kick(dev, &kick_file);
+    /* Cannot do a lot of things */
+    assert(r == 0);
+
+    event_notifier_set_handler(vq_host_notifier, NULL);
+    virtio_queue_set_host_notifier_enabled(svq->vq, true);
+    k->set_vq_handler(svq->vdev, idx, NULL);
+}
+
 /*
  * Creates vhost shadow virtqueue, and instruct vhost device to use the shadow
  * methods and file descriptors.
@@ -24,8 +138,13 @@ typedef struct VhostShadowVirtqueue {
 VhostShadowVirtqueue *vhost_shadow_vq_new(struct vhost_dev *dev, int idx)
 {
     g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
+    int vq_idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + idx);
     int r;
 
+    svq->vq = virtio_get_queue(dev->vdev, vq_idx);
+    svq->hvq = &dev->vqs[idx];
+    svq->vdev = dev->vdev;
+
     r = event_notifier_init(&svq->kick_notifier, 0);
     if (r != 0) {
         error_report("Couldn't create kick event notifier: %s",
@@ -40,7 +159,7 @@ VhostShadowVirtqueue *vhost_shadow_vq_new(struct vhost_dev *dev, int idx)
         goto err_init_call_notifier;
     }
 
-    return svq;
+    return g_steal_pointer(&svq);
 
 err_init_call_notifier:
     event_notifier_cleanup(&svq->kick_notifier);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 42836e45f3..bde688f278 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -25,6 +25,7 @@
 #include "exec/address-spaces.h"
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-access.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
 #include "migration/blocker.h"
 #include "migration/qemu-file-types.h"
 #include "sysemu/dma.h"
@@ -945,6 +946,82 @@ static void vhost_log_global_stop(MemoryListener *listener)
     }
 }
 
+static int vhost_sw_live_migration_stop(struct vhost_dev *dev)
+{
+    int idx;
+
+    WITH_RCU_READ_LOCK_GUARD() {
+        dev->sw_lm_enabled = false;
+
+        for (idx = 0; idx < dev->nvqs; ++idx) {
+            vhost_shadow_vq_stop_rcu(dev, dev->shadow_vqs[idx]);
+        }
+    }
+
+    for (idx = 0; idx < dev->nvqs; ++idx) {
+        vhost_shadow_vq_free(dev->shadow_vqs[idx]);
+    }
+
+    g_free(dev->shadow_vqs);
+    dev->shadow_vqs = NULL;
+    return 0;
+}
+
+static int vhost_sw_live_migration_start(struct vhost_dev *dev)
+{
+    int idx;
+
+    dev->shadow_vqs = g_new0(VhostShadowVirtqueue *, dev->nvqs);
+    for (idx = 0; idx < dev->nvqs; ++idx) {
+        dev->shadow_vqs[idx] = vhost_shadow_vq_new(dev, idx);
+        if (unlikely(dev->shadow_vqs[idx] == NULL)) {
+            goto err;
+        }
+    }
+
+    WITH_RCU_READ_LOCK_GUARD() {
+        for (idx = 0; idx < dev->nvqs; ++idx) {
+            int stop_idx = idx;
+            bool ok = vhost_shadow_vq_start_rcu(dev,
+                                                dev->shadow_vqs[idx]);
+
+            if (!ok) {
+                while (--stop_idx >= 0) {
+                    vhost_shadow_vq_stop_rcu(dev, dev->shadow_vqs[stop_idx]);
+                }
+
+                goto err;
+            }
+        }
+    }
+
+    dev->sw_lm_enabled = true;
+    return 0;
+
+err:
+    for (; idx >= 0; --idx) {
+        vhost_shadow_vq_free(dev->shadow_vqs[idx]);
+    }
+    g_free(dev->shadow_vqs[idx]);
+
+    return -1;
+}
+
+static int vhost_sw_live_migration_enable(struct vhost_dev *dev,
+                                          bool enable_lm)
+{
+    int r;
+
+    if (enable_lm == dev->sw_lm_enabled) {
+        return 0;
+    }
+
+    r = enable_lm ? vhost_sw_live_migration_start(dev)
+                  : vhost_sw_live_migration_stop(dev);
+
+    return r;
+}
+
 static void vhost_log_start(MemoryListener *listener,
                             MemoryRegionSection *section,
                             int old, int new)
@@ -1389,6 +1466,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     hdev->log = NULL;
     hdev->log_size = 0;
     hdev->log_enabled = false;
+    hdev->sw_lm_enabled = false;
     hdev->started = false;
     memory_listener_register(&hdev->memory_listener, &address_space_memory);
     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
@@ -1816,6 +1894,11 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
         hdev->vhost_ops->vhost_dev_start(hdev, false);
     }
     for (i = 0; i < hdev->nvqs; ++i) {
+        if (hdev->sw_lm_enabled) {
+            vhost_shadow_vq_stop_rcu(hdev, hdev->shadow_vqs[i]);
+            vhost_shadow_vq_free(hdev->shadow_vqs[i]);
+        }
+
         vhost_virtqueue_stop(hdev,
                              vdev,
                              hdev->vqs + i,
@@ -1829,6 +1912,8 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
         memory_listener_unregister(&hdev->iommu_listener);
     }
     vhost_log_put(hdev, true);
+    g_free(hdev->shadow_vqs);
+    hdev->sw_lm_enabled = false;
     hdev->started = false;
     hdev->vdev = NULL;
 }
@@ -1845,5 +1930,53 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
 
 void qmp_x_vhost_enable_shadow_vq(const char *name, bool enable, Error **errp)
 {
-    error_setg(errp, "Shadow virtqueue still not implemented.");
+    struct vhost_dev *hdev;
+    const char *err_cause = NULL;
+    const VirtioDeviceClass *k;
+    int r;
+    ErrorClass err_class = ERROR_CLASS_GENERIC_ERROR;
+
+    QLIST_FOREACH(hdev, &vhost_devices, entry) {
+        if (hdev->vdev && 0 == strcmp(hdev->vdev->name, name)) {
+            break;
+        }
+    }
+
+    if (!hdev) {
+        err_class = ERROR_CLASS_DEVICE_NOT_FOUND;
+        err_cause = "Device not found";
+        goto err;
+    }
+
+    if (!hdev->started) {
+        err_cause = "Device is not started";
+        goto err;
+    }
+
+    if (hdev->acked_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+        err_cause = "Use packed vq";
+        goto err;
+    }
+
+    if (vhost_dev_has_iommu(hdev)) {
+        err_cause = "Device use IOMMU";
+        goto err;
+    }
+
+    k = VIRTIO_DEVICE_GET_CLASS(hdev->vdev);
+    if (!k->set_vq_handler) {
+        err_cause = "Virtio device type does not support reset of vq handler";
+        goto err;
+    }
+
+    r = vhost_sw_live_migration_enable(hdev, enable);
+    if (unlikely(r)) {
+        err_cause = "Error enabling (see monitor)";
+    }
+
+err:
+    if (err_cause) {
+        error_set(errp, err_class,
+                  "Can't enable shadow vq on %s: %s", name, err_cause);
+    }
 }
-- 
2.27.0



  parent reply	other threads:[~2021-01-29 21:07 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-29 20:54 [RFC 00/10] vDPA shadow virtqueue - notifications forwarding Eugenio Pérez
2021-01-29 20:54 ` [RFC 01/10] virtio: Add virtqueue_set_handler Eugenio Pérez
2021-01-29 20:54 ` [RFC 02/10] virtio: Add set_vq_handler Eugenio Pérez
2021-01-29 20:54 ` [RFC 03/10] virtio: Add virtio_queue_get_idx Eugenio Pérez
2021-02-01  6:10   ` Jason Wang
2021-02-01  7:20     ` Eugenio Perez Martin
2021-01-29 20:54 ` [RFC 04/10] virtio: Add virtio_queue_host_notifier_status Eugenio Pérez
2021-01-29 20:54 ` [RFC 05/10] vhost: Add vhost_dev_from_virtio Eugenio Pérez
2021-02-01  6:12   ` Jason Wang
2021-02-01  8:28     ` Eugenio Perez Martin
2021-02-02  3:31       ` Jason Wang
2021-02-02 10:17         ` Eugenio Perez Martin
2021-02-04  3:14           ` Jason Wang
2021-02-04  9:25             ` Eugenio Perez Martin
2021-02-05  3:51               ` Jason Wang
2021-02-09 15:35                 ` Eugenio Perez Martin
2021-02-10  5:54                   ` Jason Wang
2021-01-29 20:54 ` [RFC 06/10] vhost: Save masked_notifier state Eugenio Pérez
2021-01-29 20:54 ` [RFC 07/10] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2021-01-29 20:54 ` [RFC 08/10] vhost: Add x-vhost-enable-shadow-vq qmp Eugenio Pérez
2021-02-02 15:38   ` Eric Blake
2021-02-04  9:01     ` Eugenio Perez Martin
2021-02-04 12:16       ` Markus Armbruster
2021-02-04 14:03         ` Eugenio Perez Martin
2021-01-29 20:54 ` Eugenio Pérez [this message]
2021-02-01  6:29   ` [RFC 09/10] vhost: Route guest->host notification through shadow virtqueue Jason Wang
2021-02-02 10:08     ` Eugenio Perez Martin
2021-02-04  3:26       ` Jason Wang
2021-02-09 15:02         ` Eugenio Perez Martin
2021-02-10  5:57           ` Jason Wang
2021-01-29 20:54 ` [RFC 10/10] vhost: Route host->guest " Eugenio Pérez

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210129205415.876290-10-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=armbru@redhat.com \
    --cc=eli@mellanox.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=jim.harford@broadcom.com \
    --cc=ml@napatech.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=rob.miller@broadcom.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xiao.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).