All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hawkins Jiawei <yin31149@gmail.com>
To: jasowang@redhat.com, mst@redhat.com, eperezma@redhat.com
Cc: qemu-devel@nongnu.org, yin31149@gmail.com, leiyang@redhat.com,
	18801353760@163.com
Subject: [PATCH v4 6/8] vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add()
Date: Tue, 29 Aug 2023 13:54:48 +0800	[thread overview]
Message-ID: <496c542c22ae1b4222175d5576c949621c7c2fc0.1693287885.git.yin31149@gmail.com> (raw)
In-Reply-To: <cover.1693287885.git.yin31149@gmail.com>

This patch moves vhost_svq_poll() to the caller of
vhost_vdpa_net_cvq_add() and introduces a helper funtion.

By making this change, next patches in this series is
able to refactor vhost_vdpa_net_load_x() only to delay
the polling and checking process until either the SVQ
is full or control commands shadow buffers are full.

Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
  - always check the return value of vhost_vdpa_net_svq_poll()
suggested Eugenio

v3: https://lore.kernel.org/all/152177c4e7082236fba9d31d535e40f8c2984349.1689748694.git.yin31149@gmail.com/

 net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 10 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 116a06cc45..d9b8b3cf6c 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -609,15 +609,21 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
                           __func__);
         }
-        return r;
     }
 
-    /*
-     * We can poll here since we've had BQL from the time we sent the
-     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
-     * when BQL is released
-     */
-    return vhost_svq_poll(svq, 1);
+    return r;
+}
+
+/*
+ * Convenience wrapper to poll SVQ for multiple control commands.
+ *
+ * Caller should hold the BQL when invoking this function, and should take
+ * the answer before SVQ pulls by itself when BQL is released.
+ */
+static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
+{
+    VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
+    return vhost_svq_poll(svq, cmds_in_flight);
 }
 
 /* Convenience wrapper to get number of available SVQ descriptors */
@@ -645,6 +651,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
         .iov_base = s->status,
         .iov_len = sizeof(*s->status),
     };
+    ssize_t r;
 
     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
     /* Each CVQ command has one out descriptor and one in descriptor */
@@ -657,7 +664,16 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
     iov_to_buf(data_sg, data_num, 0,
                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
 
-    return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
+    r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
+    if (unlikely(r < 0)) {
+        return r;
+    }
+
+    /*
+     * We can poll here since we've had BQL from the time
+     * we sent the descriptor.
+     */
+    return vhost_vdpa_net_svq_poll(s, 1);
 }
 
 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
@@ -1150,6 +1166,15 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
     if (unlikely(r < 0)) {
         return r;
     }
+
+    /*
+     * We can poll here since we've had BQL from the time
+     * we sent the descriptor.
+     */
+    r = vhost_vdpa_net_svq_poll(s, 1);
+    if (unlikely(r < sizeof(*s->status))) {
+        return r;
+    }
     if (*s->status != VIRTIO_NET_OK) {
         return sizeof(*s->status);
     }
@@ -1269,10 +1294,18 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
             goto out;
         }
     } else {
-        dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
-        if (unlikely(dev_written < 0)) {
+        ssize_t r;
+        r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
+        if (unlikely(r < 0)) {
+            dev_written = r;
             goto out;
         }
+
+        /*
+         * We can poll here since we've had BQL from the time
+         * we sent the descriptor.
+         */
+        dev_written = vhost_vdpa_net_svq_poll(s, 1);
     }
 
     if (unlikely(dev_written < sizeof(status))) {
-- 
2.25.1



  parent reply	other threads:[~2023-08-29  5:59 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-29  5:54 [PATCH v4 0/8] vdpa: Send all CVQ state load commands in parallel Hawkins Jiawei
2023-08-29  5:54 ` [PATCH v4 1/8] vhost: Add count argument to vhost_svq_poll() Hawkins Jiawei
2023-08-29  5:54 ` [PATCH v4 2/8] vdpa: Use iovec for vhost_vdpa_net_cvq_add() Hawkins Jiawei
2023-10-03 17:39   ` Eugenio Perez Martin
2023-08-29  5:54 ` [PATCH v4 3/8] vhost: Expose vhost_svq_available_slots() Hawkins Jiawei
2023-10-03 17:44   ` Eugenio Perez Martin
2023-10-08  1:35     ` Hawkins Jiawei
2023-08-29  5:54 ` [PATCH v4 4/8] vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load() Hawkins Jiawei
2023-10-03 17:48   ` Eugenio Perez Martin
2023-10-08  1:38     ` Hawkins Jiawei
2023-08-29  5:54 ` [PATCH v4 5/8] vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() Hawkins Jiawei
2023-08-29  5:54 ` Hawkins Jiawei [this message]
2023-08-29  5:54 ` [PATCH v4 7/8] vdpa: Introduce cursors to vhost_vdpa_net_loadx() Hawkins Jiawei
2023-10-04  7:21   ` Eugenio Perez Martin
2023-10-08  2:03     ` Hawkins Jiawei
2023-08-29  5:54 ` [PATCH v4 8/8] vdpa: Send cvq state load commands in parallel Hawkins Jiawei
2023-10-04  7:33   ` Eugenio Perez Martin
2023-10-08  2:24     ` Hawkins Jiawei
2023-08-29  9:32 ` [PATCH v4 0/8] vdpa: Send all CVQ " Hawkins Jiawei
2023-10-01 19:56 ` Michael S. Tsirkin
2023-10-03 18:21   ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=496c542c22ae1b4222175d5576c949621c7c2fc0.1693287885.git.yin31149@gmail.com \
    --to=yin31149@gmail.com \
    --cc=18801353760@163.com \
    --cc=eperezma@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=leiyang@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.