All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Gautam Dawar <gdawar@xilinx.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Markus Armbruster <armbru@redhat.com>,
	"Gonglei (Arei)" <arei.gonglei@huawei.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Laurent Vivier <lvivier@redhat.com>, Eli Cohen <eli@mellanox.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>,
	Eric Blake <eblake@redhat.com>, Cindy Lu <lulu@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Parav Pandit <parav@mellanox.com>
Subject: [RFC PATCH v8 08/21] vhost: Add SVQElement
Date: Thu, 19 May 2022 21:12:53 +0200	[thread overview]
Message-ID: <20220519191306.821774-9-eperezma@redhat.com> (raw)
In-Reply-To: <20220519191306.821774-1-eperezma@redhat.com>

This allows SVQ to add metadata to the different queue elements.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-shadow-virtqueue.h |  8 ++++--
 hw/virtio/vhost-shadow-virtqueue.c | 46 ++++++++++++++++--------------
 2 files changed, 31 insertions(+), 23 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 50f45153c0..e06ac52158 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
 #include "standard-headers/linux/vhost_types.h"
 #include "hw/virtio/vhost-iova-tree.h"
 
+typedef struct SVQElement {
+    VirtQueueElement elem;
+} SVQElement;
+
 typedef void (*VirtQueueElementCallback)(VirtIODevice *vdev,
                                          const VirtQueueElement *elem);
 
@@ -64,10 +68,10 @@ typedef struct VhostShadowVirtqueue {
     VhostIOVATree *iova_tree;
 
     /* Map for use the guest's descriptors */
-    VirtQueueElement **ring_id_maps;
+    SVQElement **ring_id_maps;
 
     /* Next VirtQueue element that guest made available */
-    VirtQueueElement *next_guest_avail_elem;
+    SVQElement *next_guest_avail_elem;
 
     /*
      * Backup next field for each descriptor so we can recover securely, not
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 2d5d27d29c..044005ba89 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -171,9 +171,10 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
     return true;
 }
 
-static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
-                                VirtQueueElement *elem, unsigned *head)
+static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, SVQElement *svq_elem,
+                                unsigned *head)
 {
+    const VirtQueueElement *elem = &svq_elem->elem;
     unsigned avail_idx;
     vring_avail_t *avail = svq->vring.avail;
     bool ok;
@@ -222,7 +223,7 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
  * takes ownership of the element: In case of failure, it is free and the SVQ
  * is considered broken.
  */
-static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
+static bool vhost_svq_add(VhostShadowVirtqueue *svq, SVQElement *elem)
 {
     unsigned qemu_head;
     bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
@@ -272,19 +273,21 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
         virtio_queue_set_notification(svq->vq, false);
 
         while (true) {
+            SVQElement *svq_elem;
             VirtQueueElement *elem;
             bool ok;
 
             if (svq->next_guest_avail_elem) {
-                elem = g_steal_pointer(&svq->next_guest_avail_elem);
+                svq_elem = g_steal_pointer(&svq->next_guest_avail_elem);
             } else {
-                elem = virtqueue_pop(svq->vq, sizeof(*elem));
+                svq_elem = virtqueue_pop(svq->vq, sizeof(*svq_elem));
             }
 
-            if (!elem) {
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
                 /*
                  * This condition is possible since a contiguous buffer in GPA
@@ -297,11 +300,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
                  * queue the current guest descriptor and ignore further kicks
                  * until some elements are used.
                  */
-                svq->next_guest_avail_elem = elem;
+                svq->next_guest_avail_elem = svq_elem;
                 return;
             }
 
-            ok = vhost_svq_add(svq, elem);
+            ok = vhost_svq_add(svq, svq_elem);
             if (unlikely(!ok)) {
                 /* VQ is broken, just return and ignore any other kicks */
                 return;
@@ -368,8 +371,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
     return i;
 }
 
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
-                                           uint32_t *len)
+static SVQElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
 {
     const vring_used_t *used = svq->vring.used;
     vring_used_elem_t used_elem;
@@ -399,8 +401,8 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
         return NULL;
     }
 
-    num = svq->ring_id_maps[used_elem.id]->in_num +
-          svq->ring_id_maps[used_elem.id]->out_num;
+    num = svq->ring_id_maps[used_elem.id]->elem.in_num +
+          svq->ring_id_maps[used_elem.id]->elem.out_num;
     last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
     svq->desc_next[last_used_chain] = svq->free_head;
     svq->free_head = used_elem.id;
@@ -421,11 +423,13 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
         vhost_svq_disable_notification(svq);
         while (true) {
             uint32_t len;
-            g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
-            if (!elem) {
+            g_autofree SVQElement *svq_elem = vhost_svq_get_buf(svq, &len);
+            VirtQueueElement *elem;
+            if (!svq_elem) {
                 break;
             }
 
+            elem = &svq_elem->elem;
             if (svq->ops && svq->ops->used_elem_handler) {
                 svq->ops->used_elem_handler(svq->vdev, elem);
             }
@@ -580,7 +584,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
     memset(svq->vring.desc, 0, driver_size);
     svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
     memset(svq->vring.used, 0, device_size);
-    svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+    svq->ring_id_maps = g_new0(SVQElement *, svq->vring.num);
     svq->desc_next = g_new0(uint16_t, svq->vring.num);
     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
         svq->desc_next[i] = cpu_to_le16(i + 1);
@@ -594,7 +598,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
 void vhost_svq_stop(VhostShadowVirtqueue *svq)
 {
     event_notifier_set_handler(&svq->svq_kick, NULL);
-    g_autofree VirtQueueElement *next_avail_elem = NULL;
+    g_autofree SVQElement *next_avail_elem = NULL;
 
     if (!svq->vq) {
         return;
@@ -604,16 +608,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
     vhost_svq_flush(svq, false);
 
     for (unsigned i = 0; i < svq->vring.num; ++i) {
-        g_autofree VirtQueueElement *elem = NULL;
-        elem = g_steal_pointer(&svq->ring_id_maps[i]);
-        if (elem) {
-            virtqueue_detach_element(svq->vq, elem, 0);
+        g_autofree SVQElement *svq_elem = NULL;
+        svq_elem = g_steal_pointer(&svq->ring_id_maps[i]);
+        if (svq_elem) {
+            virtqueue_detach_element(svq->vq, &svq_elem->elem, 0);
         }
     }
 
     next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
     if (next_avail_elem) {
-        virtqueue_detach_element(svq->vq, next_avail_elem, 0);
+        virtqueue_detach_element(svq->vq, &next_avail_elem->elem, 0);
     }
     svq->vq = NULL;
     g_free(svq->desc_next);
-- 
2.27.0



  parent reply	other threads:[~2022-05-19 19:27 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-19 19:12 [RFC PATCH v8 00/21] Net Control VQ support with asid in vDPA SVQ Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 01/21] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-06-07  6:13   ` Jason Wang
2022-06-08 16:30     ` Eugenio Perez Martin
2022-05-19 19:12 ` [RFC PATCH v8 02/21] vhost: Add custom used buffer callback Eugenio Pérez
2022-06-07  6:12   ` Jason Wang
2022-06-08 19:38     ` Eugenio Perez Martin
2022-05-19 19:12 ` [RFC PATCH v8 03/21] vdpa: control virtqueue support on shadow virtqueue Eugenio Pérez
2022-06-07  6:05   ` Jason Wang
2022-06-08 16:38     ` Eugenio Perez Martin
2022-05-19 19:12 ` [RFC PATCH v8 04/21] virtio: Make virtqueue_alloc_element non-static Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 05/21] vhost: Add vhost_iova_tree_find Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 06/21] vdpa: Add map/unmap operation callback to SVQ Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 07/21] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-05-19 19:12 ` Eugenio Pérez [this message]
2022-05-19 19:12 ` [RFC PATCH v8 09/21] vhost: Add svq copy desc mode Eugenio Pérez
2022-06-08  4:14   ` Jason Wang
2022-06-08 19:02     ` Eugenio Perez Martin
2022-06-09  7:00       ` Jason Wang
2022-05-19 19:12 ` [RFC PATCH v8 10/21] vhost: Add vhost_svq_inject Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 11/21] vhost: Update kernel headers Eugenio Pérez
2022-06-08  4:18   ` Jason Wang
2022-06-08 19:04     ` Eugenio Perez Martin
2022-05-19 19:12 ` [RFC PATCH v8 12/21] vdpa: delay set_vring_ready after DRIVER_OK Eugenio Pérez
2022-06-08  4:20   ` Jason Wang
2022-06-08 19:06     ` Eugenio Perez Martin
2022-05-19 19:12 ` [RFC PATCH v8 13/21] vhost: Add ShadowVirtQueueStart operation Eugenio Pérez
2022-05-19 19:12 ` [RFC PATCH v8 14/21] vhost: Make possible to check for device exclusive vq group Eugenio Pérez
2022-06-08  4:25   ` Jason Wang
2022-06-08 19:21     ` Eugenio Perez Martin
2022-06-09  7:13       ` Jason Wang
2022-06-09  7:51         ` Eugenio Perez Martin
2022-05-19 19:13 ` [RFC PATCH v8 15/21] vhost: add vhost_svq_poll Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 16/21] vdpa: Add vhost_vdpa_start_control_svq Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 17/21] vdpa: Add asid attribute to vdpa device Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 18/21] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 19/21] vhost: Add reference counting to vhost_iova_tree Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 20/21] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-05-19 19:13 ` [RFC PATCH v8 21/21] vdpa: Add x-cvq-svq Eugenio Pérez
2022-06-08  5:51 ` [RFC PATCH v8 00/21] Net Control VQ support with asid in vDPA SVQ Jason Wang
2022-06-08 19:28   ` Eugenio Perez Martin
2022-06-13 16:31     ` Eugenio Perez Martin
2022-06-14  8:01       ` Jason Wang
2022-06-14  8:13         ` Eugenio Perez Martin
2022-06-14  8:20           ` Jason Wang
2022-06-14  9:31             ` Eugenio Perez Martin
2022-06-15  3:04               ` Jason Wang
2022-06-15 10:02                 ` Eugenio Perez Martin
2022-06-17  1:29                   ` Jason Wang
2022-06-17  8:17                     ` Eugenio Perez Martin
2022-06-20  5:07                       ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220519191306.821774-9-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=arei.gonglei@huawei.com \
    --cc=armbru@redhat.com \
    --cc=cohuck@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.