qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Parav Pandit <parav@mellanox.com>,
	Juan Quintela <quintela@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	virtualization@lists.linux-foundation.org,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Xiao W Wang <xiao.w.wang@intel.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Eli Cohen <eli@mellanox.com>, Eric Blake <eblake@redhat.com>,
	Michael Lilja <ml@napatech.com>,
	Stefano Garzarella <sgarzare@redhat.com>
Subject: [RFC PATCH v4 19/20] vhost: Use a tree to store memory mappings
Date: Fri,  1 Oct 2021 09:06:02 +0200	[thread overview]
Message-ID: <20211001070603.307037-20-eperezma@redhat.com> (raw)
In-Reply-To: <20211001070603.307037-1-eperezma@redhat.com>

Track memory translations of devices with IOMMU (all vhost-vdpa
devices at the moment). It does not work if device has restrictions in
its iova range at the moment.

Updates to tree are protected by BQL, each one always run from main
event loop context.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 include/hw/virtio/vhost-vdpa.h |  3 ++
 hw/virtio/vhost-vdpa.c         | 59 ++++++++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)

diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 9044ae694b..7353e36884 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -15,6 +15,7 @@
 #include <gmodule.h>
 
 #include "qemu/queue.h"
+#include "hw/virtio/vhost-iova-tree.h"
 #include "hw/virtio/virtio.h"
 
 typedef struct VhostVDPAHostNotifier {
@@ -29,6 +30,8 @@ typedef struct vhost_vdpa {
     uint64_t host_features;
     uint64_t guest_features;
     bool shadow_vqs_enabled;
+    /* IOVA mapping used by Shadow Virtqueue */
+    VhostIOVATree *iova_map;
     GPtrArray *shadow_vqs;
     struct vhost_dev *dev;
     QLIST_ENTRY(vhost_vdpa) entry;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index bb7010ddb5..a9c680b487 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -395,6 +395,7 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
         vhost_svq_stop(dev, idx, g_ptr_array_index(v->shadow_vqs, idx));
     }
     g_ptr_array_free(v->shadow_vqs, true);
+    g_clear_pointer(&v->iova_map, vhost_iova_tree_unref);
 }
 
 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
@@ -753,6 +754,22 @@ static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
     return true;
 }
 
+/**
+ * Maps QEMU vaddr memory to device in a suitable way for shadow virtqueue:
+ * - It always reference qemu memory address, not guest's memory.
+ * - TODO It's always in range of device.
+ *
+ * It returns the translated address
+ */
+static int vhost_vdpa_svq_map(struct vhost_vdpa *v, VhostDMAMap *map)
+{
+    int r = vhost_iova_tree_alloc(v->iova_map, map);
+    assert(r == VHOST_DMA_MAP_OK);
+
+    return vhost_vdpa_dma_map(v, map->iova, map->size, map->translated_addr,
+                              false);
+}
+
 static int vhost_vdpa_vring_pause(struct vhost_dev *dev)
 {
     int r;
@@ -771,6 +788,7 @@ static int vhost_vdpa_vring_pause(struct vhost_dev *dev)
  */
 static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, unsigned idx)
 {
+    VhostDMAMap device_region, driver_region;
     struct vhost_vdpa *v = dev->opaque;
     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, idx);
     EventNotifier *vhost_call_notifier = vhost_svq_get_svq_call_notifier(svq);
@@ -789,6 +807,33 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, unsigned idx)
     bool b;
 
     vhost_svq_get_vring_addr(svq, &addr);
+    driver_region = (VhostDMAMap) {
+        .translated_addr = (void *)addr.desc_user_addr,
+
+        /*
+         * DMAMAp.size include the last byte included in the range, while
+         * sizeof marks one past it. Substract one byte to make them match.
+         */
+        .size = vhost_svq_driver_area_size(svq) - 1,
+        .perm = VHOST_ACCESS_RO,
+    };
+    device_region = (VhostDMAMap) {
+        .translated_addr = (void *)addr.used_user_addr,
+        .size = vhost_svq_device_area_size(svq) - 1,
+        .perm = VHOST_ACCESS_RW,
+    };
+
+    r = vhost_vdpa_svq_map(v, &driver_region);
+    assert(r == 0);
+    r = vhost_vdpa_svq_map(v, &device_region);
+    assert(r == 0);
+
+    /* Expose IOVA addresses to vDPA device */
+    addr.avail_user_addr = driver_region.iova + addr.avail_user_addr
+                           - addr.desc_user_addr;
+    addr.desc_user_addr = driver_region.iova;
+    addr.used_user_addr = device_region.iova;
+
     r = vhost_vdpa_set_vring_addr(dev, &addr);
     if (unlikely(r)) {
         error_report("vhost_set_vring_addr for shadow vq failed");
@@ -822,6 +867,17 @@ static bool vhost_vdpa_svq_start_vq(struct vhost_dev *dev, unsigned idx)
     return true;
 }
 
+/**
+ * Enable or disable shadow virtqueue in a vhost vdpa device.
+ *
+ * This function is idempotent, to call it many times with the same value for
+ * enable_svq will simply return success.
+ *
+ * @v       The vhost vdpa device
+ * @enable  The value of shadow virtqueue we want.
+ *
+ * Returns the number of queues changed.
+ */
 static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable)
 {
     struct vhost_dev *hdev = v->dev;
@@ -833,6 +889,8 @@ static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable)
     }
 
     if (enable) {
+        v->iova_map = vhost_iova_tree_new();
+
         /* Allocate resources */
         assert(v->shadow_vqs->len == 0);
         for (n = 0; n < hdev->nvqs; ++n) {
@@ -907,6 +965,7 @@ static unsigned vhost_vdpa_enable_svq(struct vhost_vdpa *v, bool enable)
     if (!enable) {
         /* Resources cleanup */
         g_ptr_array_set_size(v->shadow_vqs, 0);
+        g_clear_pointer(&v->iova_map, vhost_iova_tree_unref);
     }
 
     return n;
-- 
2.27.0



  parent reply	other threads:[~2021-10-01  7:26 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-01  7:05 [RFC PATCH v4 00/20] vDPA shadow virtqueue Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 01/20] virtio: Add VIRTIO_F_QUEUE_STATE Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 02/20] virtio-net: Honor VIRTIO_CONFIG_S_DEVICE_STOPPED Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 03/20] virtio: Add virtio_queue_is_host_notifier_enabled Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 04/20] vhost: Make vhost_virtqueue_{start,stop} public Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 05/20] vhost: Add x-vhost-enable-shadow-vq qmp Eugenio Pérez
2021-10-12  5:18   ` Markus Armbruster
2021-10-12 13:08     ` Eugenio Perez Martin
2021-10-12 13:45       ` Markus Armbruster
2021-10-14 12:01         ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 06/20] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 07/20] vdpa: Register vdpa devices in a list Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 08/20] vhost: Route guest->host notification through shadow virtqueue Eugenio Pérez
2021-10-12  5:19   ` Markus Armbruster
2021-10-12 13:09     ` Eugenio Perez Martin
2021-10-13  3:27   ` Jason Wang
2021-10-14 12:00     ` Eugenio Perez Martin
2021-10-15  3:45       ` Jason Wang
2021-10-15  9:08         ` Eugenio Perez Martin
2021-10-15 18:21       ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 09/20] vdpa: Save call_fd in vhost-vdpa Eugenio Pérez
2021-10-13  3:43   ` Jason Wang
2021-10-14 12:11     ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 10/20] vhost-vdpa: Take into account SVQ in vhost_vdpa_set_vring_call Eugenio Pérez
2021-10-13  3:43   ` Jason Wang
2021-10-14 12:18     ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 11/20] vhost: Route host->guest notification through shadow virtqueue Eugenio Pérez
2021-10-13  3:47   ` Jason Wang
2021-10-14 16:39     ` Eugenio Perez Martin
2021-10-15  4:42       ` Jason Wang
2021-10-19  8:39         ` Eugenio Perez Martin
2021-10-20  2:01           ` Jason Wang
2021-10-20  6:36             ` Eugenio Perez Martin
2021-10-13  3:49   ` Jason Wang
2021-10-14 15:58     ` Eugenio Perez Martin
2021-10-15  4:24       ` Jason Wang
2021-10-01  7:05 ` [RFC PATCH v4 12/20] virtio: Add vhost_shadow_vq_get_vring_addr Eugenio Pérez
2021-10-13  3:54   ` Jason Wang
2021-10-14 14:39     ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 13/20] vdpa: Save host and guest features Eugenio Pérez
2021-10-13  3:56   ` Jason Wang
2021-10-14 15:03     ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 14/20] vhost: Add vhost_svq_valid_device_features to shadow vq Eugenio Pérez
2021-10-01  7:05 ` [RFC PATCH v4 15/20] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2021-10-12  5:21   ` Markus Armbruster
2021-10-12 13:28     ` Eugenio Perez Martin
2021-10-12 13:48       ` Markus Armbruster
2021-10-14 15:04         ` Eugenio Perez Martin
2021-10-13  4:31   ` Jason Wang
2021-10-14 17:56     ` Eugenio Perez Martin
2021-10-15  4:23       ` Jason Wang
2021-10-15  9:33         ` Eugenio Perez Martin
2021-10-01  7:05 ` [RFC PATCH v4 16/20] vhost: Check for device VRING_USED_F_NO_NOTIFY at shadow virtqueue kick Eugenio Pérez
2021-10-13  4:35   ` Jason Wang
2021-10-15  6:17     ` Eugenio Perez Martin
2021-10-01  7:06 ` [RFC PATCH v4 17/20] vhost: Use VRING_AVAIL_F_NO_INTERRUPT at device call on shadow virtqueue Eugenio Pérez
2021-10-13  4:36   ` Jason Wang
2021-10-15  6:22     ` Eugenio Perez Martin
2021-10-01  7:06 ` [RFC PATCH v4 18/20] vhost: Add VhostIOVATree Eugenio Pérez
2021-10-19  8:32   ` Jason Wang
2021-10-19  9:22     ` Jason Wang
2021-10-20  7:54       ` Eugenio Perez Martin
2021-10-20  9:01         ` Jason Wang
2021-10-20 12:06           ` Eugenio Perez Martin
2021-10-21  2:34             ` Jason Wang
2021-10-21  7:03               ` Eugenio Perez Martin
2021-10-21  8:12                 ` Jason Wang
2021-10-21 14:33                   ` Eugenio Perez Martin
2021-10-26  4:29                     ` Jason Wang
2021-10-20  7:36     ` Eugenio Perez Martin
2021-10-01  7:06 ` Eugenio Pérez [this message]
2021-10-01  7:06 ` [RFC PATCH v4 20/20] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2021-10-13  5:34   ` Jason Wang
2021-10-15  7:27     ` Eugenio Perez Martin
2021-10-15  7:37       ` Jason Wang
2021-10-15  8:20         ` Eugenio Perez Martin
2021-10-15  8:37           ` Jason Wang
2021-10-15  9:14           ` Eugenio Perez Martin
2021-10-19  9:24   ` Jason Wang
2021-10-19 10:28     ` Eugenio Perez Martin
2021-10-20  2:02       ` Jason Wang
2021-10-20  2:07         ` Jason Wang
2021-10-20  6:51           ` Eugenio Perez Martin
2021-10-20  9:03             ` Jason Wang
2021-10-20 11:56               ` Eugenio Perez Martin
2021-10-21  2:38                 ` Jason Wang
2021-10-26  4:32                 ` Jason Wang
2021-10-12  3:59 ` [RFC PATCH v4 00/20] vDPA shadow virtqueue Jason Wang
2021-10-12  4:06   ` Jason Wang
2021-10-12  9:09     ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001070603.307037-20-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=armbru@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=ml@napatech.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xiao.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).