All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Peter Xu <peterx@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Xiao W Wang <xiao.w.wang@intel.com>, Eli Cohen <eli@mellanox.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Eric Blake <eblake@redhat.com>,
	virtualization@lists.linux-foundation.org,
	Eduardo Habkost <ehabkost@redhat.com>
Subject: [RFC PATCH v5 12/26] vhost: Route guest->host notification through shadow virtqueue
Date: Fri, 29 Oct 2021 20:35:11 +0200	[thread overview]
Message-ID: <20211029183525.1776416-13-eperezma@redhat.com> (raw)
In-Reply-To: <20211029183525.1776416-1-eperezma@redhat.com>

At this mode no buffer forwarding will be performed in SVQ mode: Qemu
just forward the guest's kicks to the device.

Shadow virtqueue notifications forwarding is disabled when vhost_dev
stops, so code flow follows usual cleanup.

Also, host notifiers must be disabled at SVQ start, and they will not
start if SVQ has been enabled when device is stopped. This is trivial
to address, but it is left out for simplicity at this moment.

This is an intermediate step before introduce the full SVQ mode, useful
to test if the device is playing well with notifications forwarding.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 qapi/net.json                  |   5 +-
 include/hw/virtio/vhost-vdpa.h |   6 ++
 hw/virtio/vhost-vdpa.c         | 183 ++++++++++++++++++++++++++++++++-
 net/vhost-vdpa.c               |  24 ++++-
 4 files changed, 210 insertions(+), 8 deletions(-)

diff --git a/qapi/net.json b/qapi/net.json
index b191b6787b..fca2f6ebca 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -84,12 +84,13 @@
 #
 # Use vhost shadow virtqueue.
 #
+# SVQ can just forward notifications between the device and the guest at this
+# moment. This will expand in future changes.
+#
 # @name: the device name of the VirtIO device
 #
 # @set: true to use the alternate shadow VQ notifications
 #
-# Returns: Always error, since SVQ is not implemented at the moment.
-#
 # Since: 6.2
 #
 # Example:
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index c79a21c3c8..6d60092c96 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -12,6 +12,8 @@
 #ifndef HW_VIRTIO_VHOST_VDPA_H
 #define HW_VIRTIO_VHOST_VDPA_H
 
+#include <gmodule.h>
+
 #include "hw/virtio/virtio.h"
 #include "standard-headers/linux/vhost_types.h"
 
@@ -27,9 +29,13 @@ typedef struct vhost_vdpa {
     bool iotlb_batch_begin_sent;
     MemoryListener listener;
     struct vhost_vdpa_iova_range iova_range;
+    bool shadow_vqs_enabled;
+    GPtrArray *shadow_vqs;
     struct vhost_dev *dev;
     int kick_fd[VIRTIO_QUEUE_MAX];
     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
 } VhostVDPA;
 
+void vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable, Error **errp);
+
 #endif
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index e6ee227385..c388705e73 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -17,12 +17,14 @@
 #include "hw/virtio/vhost.h"
 #include "hw/virtio/vhost-backend.h"
 #include "hw/virtio/virtio-net.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
 #include "hw/virtio/vhost-vdpa.h"
 #include "exec/address-spaces.h"
 #include "qemu/main-loop.h"
 #include "cpu.h"
 #include "trace.h"
 #include "qemu-common.h"
+#include "qapi/error.h"
 
 /*
  * Return one past the end of the end of section. Be careful with uint64_t
@@ -326,6 +328,16 @@ static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
     return v->index != 0;
 }
 
+/**
+ * Adaptor function to free shadow virtqueue through gpointer
+ *
+ * @svq   The Shadow Virtqueue
+ */
+static void vhost_psvq_free(gpointer svq)
+{
+    vhost_svq_free(svq);
+}
+
 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
 {
     struct vhost_vdpa *v;
@@ -337,6 +349,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
     dev->opaque =  opaque ;
     v->listener = vhost_vdpa_memory_listener;
     v->msg_type = VHOST_IOTLB_MSG_V2;
+    v->shadow_vqs = g_ptr_array_new_full(dev->nvqs, vhost_psvq_free);
 
     vhost_vdpa_get_iova_range(v);
 
@@ -361,7 +374,13 @@ static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
     n = &v->notifier[queue_index];
 
     if (n->addr) {
-        virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
+        if (v->shadow_vqs_enabled) {
+            VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs,
+                                                          queue_index);
+            vhost_svq_set_host_mr_notifier(svq, NULL);
+        } else {
+            virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
+        }
         object_unparent(OBJECT(&n->mr));
         munmap(n->addr, page_size);
         n->addr = NULL;
@@ -403,7 +422,12 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
                                       page_size, addr);
     g_free(name);
 
-    if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
+    if (v->shadow_vqs_enabled) {
+        VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs,
+                                                      queue_index);
+        vhost_svq_set_host_mr_notifier(svq, addr);
+    } else if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr,
+                                                 true)) {
         munmap(addr, page_size);
         goto err;
     }
@@ -432,6 +456,17 @@ err:
     return;
 }
 
+static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
+{
+    struct vhost_vdpa *v = dev->opaque;
+    size_t idx;
+
+    for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
+        vhost_svq_stop(dev, idx, g_ptr_array_index(v->shadow_vqs, idx));
+    }
+    g_ptr_array_free(v->shadow_vqs, true);
+}
+
 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
 {
     struct vhost_vdpa *v;
@@ -440,6 +475,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
     trace_vhost_vdpa_cleanup(dev, v);
     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
     memory_listener_unregister(&v->listener);
+    vhost_vdpa_svq_cleanup(dev);
 
     dev->opaque = NULL;
     return 0;
@@ -699,16 +735,27 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
     return ret;
 }
 
+static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
+                                         struct vhost_vring_file *file)
+{
+    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
+    return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
+}
+
 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
                                        struct vhost_vring_file *file)
 {
     struct vhost_vdpa *v = dev->opaque;
     int vdpa_idx = vhost_vdpa_get_vq_index(dev, file->index);
 
-    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
-
     v->kick_fd[vdpa_idx] = file->fd;
-    return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
+    if (v->shadow_vqs_enabled) {
+        VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+        vhost_svq_set_svq_kick_fd(svq, file->fd);
+        return 0;
+    } else {
+        return vhost_vdpa_set_vring_dev_kick(dev, file);
+    }
 }
 
 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
@@ -755,6 +802,132 @@ static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
     return true;
 }
 
+/*
+ * Start or stop a shadow virtqueue in a vdpa device
+ *
+ * @dev Vhost device
+ * @idx Vhost device model queue index
+ * @svq_mode Shadow virtqueue mode
+ * @errp Error if any
+ *
+ * The function will not fall back previous values to vhost-vdpa device, so in
+ * case of a failure setting again the device properties calling this function
+ * with the negated svq_mode is needed.
+ */
+static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, unsigned idx,
+                                    bool svq_mode, Error **errp)
+{
+    struct vhost_vdpa *v = dev->opaque;
+    VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, idx);
+    VhostVDPAHostNotifier *n = &v->notifier[idx];
+    unsigned vq_index = idx + dev->vq_index;
+    struct vhost_vring_file vhost_kick_file = {
+        .index = vq_index,
+    };
+    int r;
+
+    if (svq_mode) {
+        const EventNotifier *vhost_kick = vhost_svq_get_dev_kick_notifier(svq);
+
+        if (n->addr) {
+            r = virtio_queue_set_host_notifier_mr(dev->vdev, idx, &n->mr,
+                                                  false);
+
+            /*
+             * vhost_vdpa_host_notifier_init already validated as a proper
+             * host notifier memory region
+             */
+            assert(r == 0);
+            vhost_svq_set_host_mr_notifier(svq, n->addr);
+        }
+        vhost_svq_start(dev, idx, svq, v->kick_fd[idx]);
+
+        vhost_kick_file.fd = event_notifier_get_fd(vhost_kick);
+    } else {
+        vhost_svq_stop(dev, idx, svq);
+
+        if (n->addr) {
+            r = virtio_queue_set_host_notifier_mr(dev->vdev, idx, &n->mr,
+                                                  true);
+            /*
+             * vhost_vdpa_host_notifier_init already validated as a proper
+             * host notifier memory region
+             */
+            assert(r == 0);
+        }
+        vhost_kick_file.fd = v->kick_fd[idx];
+    }
+
+    r = vhost_vdpa_set_vring_dev_kick(dev, &vhost_kick_file);
+    if (unlikely(r)) {
+        error_setg_errno(errp, -r, "vhost_vdpa_set_vring_kick failed");
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * Enable or disable shadow virtqueue in a vhost vdpa device.
+ *
+ * This function is idempotent, to call it many times with the same value for
+ * enable_svq will simply return success.
+ *
+ * @v       Vhost vdpa device
+ * @enable  True to set SVQ mode
+ * @errp    Error pointer
+ */
+void vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable, Error **errp)
+{
+    struct vhost_dev *hdev = v->dev;
+    unsigned n;
+    ERRP_GUARD();
+
+    if (enable == v->shadow_vqs_enabled) {
+        return;
+    }
+
+    if (enable) {
+        /* Allocate resources */
+        assert(v->shadow_vqs->len == 0);
+        for (n = 0; n < hdev->nvqs; ++n) {
+            VhostShadowVirtqueue *svq = vhost_svq_new(hdev, n);
+            bool ok;
+
+            if (unlikely(!svq)) {
+                error_setg(errp, "Cannot create svq");
+                enable = false;
+                goto err_svq_new;
+            }
+            g_ptr_array_add(v->shadow_vqs, svq);
+
+            ok = vhost_vdpa_svq_start_vq(hdev, n, true, errp);
+            if (unlikely(!ok)) {
+                /* Free still not started svqs, and go with disable path */
+                g_ptr_array_set_size(v->shadow_vqs, n);
+                enable = false;
+                break;
+            }
+        }
+    }
+
+    v->shadow_vqs_enabled = enable;
+
+    if (!enable) {
+        /* Disable all queues or clean up failed start */
+        for (n = 0; n < v->shadow_vqs->len; ++n) {
+            vhost_vdpa_svq_start_vq(hdev, n, false, *errp ? NULL : errp);
+        }
+
+    }
+
+err_svq_new:
+    if (!enable) {
+        /* Resources cleanup */
+        g_ptr_array_set_size(v->shadow_vqs, 0);
+    }
+}
+
 const VhostOps vdpa_ops = {
         .backend_type = VHOST_BACKEND_TYPE_VDPA,
         .vhost_backend_init = vhost_vdpa_init,
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 3b360da27d..325971d8da 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -305,5 +305,27 @@ err:
 
 void qmp_x_vhost_set_shadow_vq(const char *name, bool set, Error **errp)
 {
-    error_setg(errp, "Shadow virtqueue still not implemented");
+    NetClientState *ncs;
+    int queues;
+    ERRP_GUARD();
+
+    queues = qemu_find_net_clients_except(name, &ncs,
+                                          NET_CLIENT_DRIVER_NIC, 1);
+
+    if (!queues) {
+        error_setg(errp, "Device not found");
+    } else if (ncs->info->type != NET_CLIENT_DRIVER_VHOST_VDPA) {
+        error_setg(errp, "Device type is not vdpa");
+    } else if (queues > 1) {
+        error_setg(errp, "Device has control virtqueue");
+    } else {
+        VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, ncs);
+        struct vhost_vdpa *v = &s->vhost_vdpa;
+
+        vhost_vdpa_enable_svq(v, set, errp);
+    }
+
+    if (*errp) {
+        error_prepend(errp, "Can't set shadow vq on %s: ", name);
+    }
 }
-- 
2.27.0



  parent reply	other threads:[~2021-10-29 18:58 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-29 18:34 [RFC PATCH v5 00/26] vDPA shadow virtqueue Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 01/26] util: Make some iova_tree parameters const Eugenio Pérez
2021-10-31 18:59   ` Juan Quintela
2021-10-31 18:59     ` Juan Quintela
2021-11-01  8:20     ` Eugenio Perez Martin
2021-10-29 18:35 ` [RFC PATCH v5 02/26] vhost: Fix last queue index of devices with no cvq Eugenio Pérez
2021-11-02  7:25   ` Juan Quintela
2021-11-02  7:25     ` Juan Quintela
2021-11-02  7:32     ` Michael S. Tsirkin
2021-11-02  7:32       ` Michael S. Tsirkin
2021-11-02  7:39       ` Juan Quintela
2021-11-02  7:39         ` Juan Quintela
2021-11-02  8:34     ` Eugenio Perez Martin
2021-11-02  7:40   ` Juan Quintela
2021-11-02  7:40     ` Juan Quintela
2021-10-29 18:35 ` [RFC PATCH v5 03/26] virtio: Add VIRTIO_F_QUEUE_STATE Eugenio Pérez
2021-11-02  4:57   ` Jason Wang
2021-11-02  4:57     ` Jason Wang
2021-10-29 18:35 ` [RFC PATCH v5 04/26] virtio-net: Honor VIRTIO_CONFIG_S_DEVICE_STOPPED Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 05/26] vhost: Add x-vhost-set-shadow-vq qmp Eugenio Pérez
2021-11-02  7:36   ` Juan Quintela
2021-11-02  7:36     ` Juan Quintela
2021-11-02  8:29     ` Eugenio Perez Martin
2021-10-29 18:35 ` [RFC PATCH v5 06/26] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 07/26] vdpa: Save kick_fd in vhost-vdpa Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 08/26] vdpa: Add vhost_svq_get_dev_kick_notifier Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 09/26] vdpa: Add vhost_svq_set_svq_kick_fd Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 10/26] vhost: Add Shadow VirtQueue kick forwarding capabilities Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 11/26] vhost: Handle host notifiers in SVQ Eugenio Pérez
2021-11-02  7:54   ` Jason Wang
2021-11-02  7:54     ` Jason Wang
2021-11-02  8:46     ` Eugenio Perez Martin
     [not found]       ` <CACGkMEvOxUMo1WA4tUfDhw+FOJVW87JJGPw=U3JvUSQTU_ogWQ@mail.gmail.com>
     [not found]         ` <CAJaqyWd4DQwRSL5StCft+3-uq12TW5x1o4DN_YW97D0JzOr2XQ@mail.gmail.com>
2021-11-04  2:31           ` Jason Wang
2021-11-04  2:31             ` Jason Wang
2021-10-29 18:35 ` Eugenio Pérez [this message]
2021-11-02  5:36   ` [RFC PATCH v5 12/26] vhost: Route guest->host notification through shadow virtqueue Jason Wang
2021-11-02  5:36     ` Jason Wang
2021-11-02  7:35     ` Eugenio Perez Martin
2021-10-29 18:35 ` [RFC PATCH v5 13/26] Add vhost_svq_get_svq_call_notifier Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 14/26] Add vhost_svq_set_guest_call_notifier Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 15/26] vdpa: Save call_fd in vhost-vdpa Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 16/26] vhost-vdpa: Take into account SVQ in vhost_vdpa_set_vring_call Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 17/26] vhost: Route host->guest notification through shadow virtqueue Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 18/26] virtio: Add vhost_shadow_vq_get_vring_addr Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 19/26] vdpa: ack VIRTIO_F_QUEUE_STATE if device supports it Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 20/26] vhost: Add vhost_svq_valid_device_features to shadow vq Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 21/26] vhost: Add vhost_svq_valid_guest_features " Eugenio Pérez
2021-11-02  5:25   ` Jason Wang
2021-11-02  5:25     ` Jason Wang
2021-11-02  8:09     ` Eugenio Perez Martin
2021-11-03  3:18       ` Jason Wang
2021-11-03  3:18         ` Jason Wang
2021-11-03  7:43         ` Eugenio Perez Martin
2021-11-04  2:34           ` Jason Wang
2021-11-04  2:34             ` Jason Wang
2021-10-29 18:35 ` [RFC PATCH v5 22/26] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2021-11-02  7:59   ` Jason Wang
2021-11-02  7:59     ` Jason Wang
2021-11-02 10:22     ` Eugenio Perez Martin
2021-10-29 18:35 ` [RFC PATCH v5 23/26] util: Add iova_tree_alloc Eugenio Pérez
2021-11-02  6:35   ` Jason Wang
2021-11-02  6:35     ` Jason Wang
2021-11-02  8:28     ` Eugenio Perez Martin
2021-11-03  3:10       ` Jason Wang
2021-11-03  3:10         ` Jason Wang
2021-11-03  7:41         ` Eugenio Perez Martin
2021-11-23  6:56   ` Peter Xu
2021-11-23  6:56     ` Peter Xu
2021-11-23  7:08     ` Eugenio Perez Martin
2022-01-27  8:57   ` Peter Xu
2022-01-27  8:57     ` Peter Xu
2022-01-27 10:09     ` Eugenio Perez Martin
2022-01-27 11:25       ` Peter Xu
2022-01-27 11:25         ` Peter Xu
2022-01-27 11:45         ` Eugenio Perez Martin
2021-10-29 18:35 ` [RFC PATCH v5 24/26] vhost: Add VhostIOVATree Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 25/26] vhost: Use a tree to store memory mappings Eugenio Pérez
2021-10-29 18:35 ` [RFC PATCH v5 26/26] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2021-11-01  9:06 ` [RFC PATCH v5 00/26] vDPA shadow virtqueue Eugenio Perez Martin
2021-11-02  4:25 ` Jason Wang
2021-11-02  4:25   ` Jason Wang
2021-11-02 11:21   ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211029183525.1776416-13-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=armbru@redhat.com \
    --cc=eblake@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=eli@mellanox.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=richard.henderson@linaro.org \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xiao.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.