All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Liuxiangdong <liuxiangdong5@huawei.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	Jason Wang <jasowang@redhat.com>,
	alvaro.karsz@solid-run.com,
	"Gonglei (Arei)" <arei.gonglei@huawei.com>,
	Lei Yang <leiyang@redhat.com>,
	si-wei.liu@oracle.com, Eli Cohen <eli@mellanox.com>,
	Shannon Nelson <snelson@pensando.io>,
	Laurent Vivier <lvivier@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Parav Pandit <parav@mellanox.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>, Cindy Lu <lulu@redhat.com>,
	longpeng2@huawei.com, Harpreet Singh Anand <hanand@xilinx.com>
Subject: [PATCH for 8.1 v2 6/6] vdpa: Cache cvq group in VhostVDPAState
Date: Thu, 23 Mar 2023 20:54:04 +0100	[thread overview]
Message-ID: <20230323195404.1247326-7-eperezma@redhat.com> (raw)
In-Reply-To: <20230323195404.1247326-1-eperezma@redhat.com>

Continue the move of code that interacts with the device from control
virtqueue start to control virtqueue init.

As with previous patches, it reduces the number of ioctls in the
migration, reducing failure possibilities.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 40 +++++++++++++++++++++++++++-------------
 1 file changed, 27 insertions(+), 13 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index db2c9afcb3..6a60e8cc2b 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -41,6 +41,12 @@ typedef struct VhostVDPAState {
     void *cvq_cmd_out_buffer;
     virtio_net_ctrl_ack *status;
 
+    /* CVQ group if cvq_isolated_mq */
+    uint32_t cvq_group_mq;
+
+    /* CVQ group if cvq_isolated */
+    uint32_t cvq_group;
+
     /* The device always have SVQ enabled */
     bool always_svq;
 
@@ -480,7 +486,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
     struct vhost_vdpa *v;
     int64_t cvq_group;
     int r;
-    Error *err = NULL;
 
     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 
@@ -509,18 +514,14 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
         if (!s->cvq_isolated_mq) {
             return 0;
         }
+
+        cvq_group = s->cvq_group_mq;
     } else {
         if (!s->cvq_isolated) {
             return 0;
         }
-    }
 
-    cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
-                                           v->dev->vq_index_end - 1,
-                                           &err);
-    if (unlikely(cvq_group < 0)) {
-        error_report_err(err);
-        return cvq_group;
+        cvq_group = s->cvq_group;
     }
 
     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
@@ -790,11 +791,13 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
  * @device_fd vhost-vdpa file descriptor
  * @features features to negotiate
  * @cvq_index Control vq index
+ * @pcvq_group: Returns CVQ group if cvq is isolated.
  *
  * Returns -1 in case of error, 0 if false and 1 if true
  */
 static int vhost_vdpa_cvq_is_isolated(int device_fd, uint64_t features,
-                                      unsigned cvq_index, Error **errp)
+                                      unsigned cvq_index, uint32_t *pcvq_group,
+                                      Error **errp)
 {
     int64_t cvq_group;
     int r;
@@ -810,6 +813,7 @@ static int vhost_vdpa_cvq_is_isolated(int device_fd, uint64_t features,
         return cvq_group;
     }
 
+    *pcvq_group = (uint32_t)cvq_group;
     for (int i = 0; i < cvq_index; ++i) {
         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
 
@@ -836,12 +840,15 @@ static int vhost_vdpa_cvq_is_isolated(int device_fd, uint64_t features,
  *                    negotiated.
  * @cvq_isolated_mq   It'll be set to true if cvq is isolated if mq is
  *                    negotiated.
+ * @cvq_group         CVQ group if MQ is not negotiated.
+ * @cvq_group_mq      CVQ group if MQ is negotiated.
  *
  * Returns -1 in case of failure
  */
 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
                                           int cvq_index, bool *cvq_isolated,
-                                          bool *cvq_isolated_mq, Error **errp)
+                                          bool *cvq_isolated_mq, uint32_t *cvq_group,
+                                          uint32_t *cvq_group_mq, Error **errp)
 {
     uint64_t backend_features;
     int r;
@@ -850,6 +857,8 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
 
     *cvq_isolated = false;
     *cvq_isolated_mq = false;
+    *cvq_group = 0;
+    *cvq_group_mq = 0;
     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
     if (unlikely(r < 0)) {
         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
@@ -862,7 +871,7 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
 
     r = vhost_vdpa_cvq_is_isolated(device_fd,
                                    features & ~BIT_ULL(VIRTIO_NET_F_MQ), 2,
-                                   errp);
+                                   cvq_group, errp);
     if (unlikely(r < 0)) {
         if (r == -ENOTSUP) {
             /*
@@ -884,7 +893,8 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
         return 0;
     }
 
-    r = vhost_vdpa_cvq_is_isolated(device_fd, features, cvq_index * 2, errp);
+    r = vhost_vdpa_cvq_is_isolated(device_fd, features, cvq_index * 2,
+                                   cvq_group_mq, errp);
     if (unlikely(r < 0)) {
         return r;
     }
@@ -911,6 +921,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
     int ret = 0;
     assert(name);
     bool cvq_isolated, cvq_isolated_mq;
+    uint32_t cvq_group, cvq_group_mq;
 
     if (is_datapath) {
         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
@@ -918,7 +929,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
     } else {
         ret = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
                                              queue_pair_index, &cvq_isolated,
-                                             &cvq_isolated_mq, errp);
+                                             &cvq_isolated_mq, &cvq_group,
+                                             &cvq_group_mq, errp);
         if (unlikely(ret)) {
             return NULL;
         }
@@ -951,6 +963,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
         s->vhost_vdpa.shadow_vq_ops_opaque = s;
         s->cvq_isolated = cvq_isolated;
         s->cvq_isolated_mq = cvq_isolated_mq;
+        s->cvq_group = cvq_group;
+        s->cvq_group_mq = cvq_group_mq;
 
         /*
          * TODO: We cannot migrate devices with CVQ as there is no way to set
-- 
2.31.1



      parent reply	other threads:[~2023-03-23 19:55 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-23 19:53 [PATCH for 8.1 v2 0/6] Move ASID test to vhost-vdpa net initialization Eugenio Pérez
2023-03-23 19:53 ` [PATCH for 8.1 v2 1/6] vdpa: Remove status in reset tracing Eugenio Pérez
2023-03-27  9:33   ` Stefano Garzarella
2023-03-28  6:45   ` Jason Wang
2023-03-23 19:54 ` [PATCH for 8.1 v2 2/6] vdpa: add vhost_vdpa_reset_status_fd Eugenio Pérez
2023-03-27  9:34   ` Stefano Garzarella
2023-03-28  6:53   ` Jason Wang
2023-03-28  7:37     ` Eugenio Perez Martin
2023-04-21  7:45       ` Michael S. Tsirkin
2023-03-23 19:54 ` [PATCH for 8.1 v2 3/6] vdpa: add vhost_vdpa_set_dev_features_fd Eugenio Pérez
2023-03-30  6:15   ` Jason Wang
2023-03-23 19:54 ` [PATCH for 8.1 v2 4/6] vdpa: return errno in vhost_vdpa_get_vring_group error Eugenio Pérez
2023-03-27  9:34   ` Stefano Garzarella
2023-03-30  6:15   ` Jason Wang
2023-03-23 19:54 ` [PATCH for 8.1 v2 5/6] vdpa: move CVQ isolation check to net_init_vhost_vdpa Eugenio Pérez
2023-03-27  9:36   ` Stefano Garzarella
2023-03-30  6:20   ` Jason Wang
2023-03-30  6:23     ` Jason Wang
2023-03-30 10:42       ` Eugenio Perez Martin
2023-03-31  8:00         ` Jason Wang
2023-03-31 10:11           ` Eugenio Perez Martin
2023-04-03  5:32             ` Jason Wang
2023-04-03 17:21               ` Eugenio Perez Martin
2023-03-23 19:54 ` Eugenio Pérez [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230323195404.1247326-7-eperezma@redhat.com \
    --to=eperezma@redhat.com \
    --cc=alvaro.karsz@solid-run.com \
    --cc=arei.gonglei@huawei.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=leiyang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=longpeng2@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=si-wei.liu@oracle.com \
    --cc=snelson@pensando.io \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.