From: Hawkins Jiawei <yin31149@gmail.com>
To: jasowang@redhat.com, mst@redhat.com, eperezma@redhat.com
Cc: qemu-devel@nongnu.org, yin31149@gmail.com, leiyang@redhat.com,
18801353760@163.com
Subject: [PATCH v4 8/8] vdpa: Send cvq state load commands in parallel
Date: Tue, 29 Aug 2023 13:54:50 +0800 [thread overview]
Message-ID: <f25fea0b0aed78bad2dd5744a4cc5538243672e6.1693287885.git.yin31149@gmail.com> (raw)
In-Reply-To: <cover.1693287885.git.yin31149@gmail.com>
This patch enables sending CVQ state load commands
in parallel at device startup by following steps:
* Refactor vhost_vdpa_net_load_cmd() to iterate through
the control commands shadow buffers. This allows different
CVQ state load commands to use their own unique buffers.
* Delay the polling and checking of buffers until either
the SVQ is full or control commands shadow buffers are full.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1578
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
v4:
- refactor argument `cmds_in_flight` to `len` for
vhost_vdpa_net_svq_full()
- check the return value of vhost_vdpa_net_svq_poll()
in vhost_vdpa_net_svq_flush() suggested by Eugenio
- use iov_size(), vhost_vdpa_net_load_cursor_reset()
and iov_discard_front() to update the cursors instead of
accessing it directly according to Eugenio
v3: https://lore.kernel.org/all/3a002790e6c880af928c6470ecbf03e7c65a68bb.1689748694.git.yin31149@gmail.com/
net/vhost-vdpa.c | 155 +++++++++++++++++++++++++++++------------------
1 file changed, 97 insertions(+), 58 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index a71e8c9090..818464b702 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -646,6 +646,31 @@ static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
}
+/*
+ * Poll SVQ for multiple pending control commands and check the device's ack.
+ *
+ * Caller should hold the BQL when invoking this function.
+ *
+ * @s: The VhostVDPAState
+ * @len: The length of the pending status shadow buffer
+ */
+static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
+{
+ /* Device uses a one-byte length ack for each control command */
+ ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
+ if (unlikely(dev_written != len)) {
+ return -EIO;
+ }
+
+ /* check the device's ack */
+ for (int i = 0; i < len; ++i) {
+ if (s->status[i] != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor, uint8_t class,
@@ -660,10 +685,30 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
cmd_size = sizeof(ctrl) + data_size;
struct iovec out, in;
ssize_t r;
+ unsigned dummy_cursor_iov_cnt;
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+ if (vhost_vdpa_net_svq_available_slots(s) < 2 ||
+ iov_size(out_cursor, 1) < cmd_size) {
+ /*
+ * It is time to flush all pending control commands if SVQ is full
+ * or control commands shadow buffers are full.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
+ (void *)s->status);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
+ }
+
/* Each CVQ command has one out descriptor and one in descriptor */
assert(vhost_vdpa_net_svq_available_slots(s) >= 2);
+ assert(iov_size(out_cursor, 1) >= cmd_size);
/* Prepare the buffer for out descriptor for the device */
iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
@@ -681,11 +726,13 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
return r;
}
- /*
- * We can poll here since we've had BQL from the time
- * we sent the descriptor.
- */
- return vhost_vdpa_net_svq_poll(s, 1);
+ /* iterate the cursors */
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
+
+ return 0;
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
@@ -697,15 +744,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -750,15 +794,12 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
.iov_len = mul_macs_size,
},
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -770,7 +811,7 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
@@ -781,15 +822,12 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MQ,
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -801,7 +839,7 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
struct iovec *in_cursor)
{
uint64_t offloads;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
@@ -829,15 +867,12 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_GUEST_OFFLOADS,
- VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -853,16 +888,12 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
- ssize_t dev_written;
+ ssize_t r;
- dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_RX,
- cmd, &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX, cmd, &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -1019,15 +1050,12 @@ static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
.iov_base = &vid,
.iov_len = sizeof(vid),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
- VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (unlikely(*s->status != VIRTIO_NET_OK)) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_VLAN,
+ VIRTIO_NET_CTRL_VLAN_ADD,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
@@ -1096,6 +1124,17 @@ static int vhost_vdpa_net_load(NetClientState *nc)
return r;
}
+ /*
+ * We need to poll and check all pending device's used buffers.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
+ if (unlikely(r)) {
+ return r;
+ }
+
return 0;
}
--
2.25.1
next prev parent reply other threads:[~2023-08-29 5:56 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-29 5:54 [PATCH v4 0/8] vdpa: Send all CVQ state load commands in parallel Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 1/8] vhost: Add count argument to vhost_svq_poll() Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 2/8] vdpa: Use iovec for vhost_vdpa_net_cvq_add() Hawkins Jiawei
2023-10-03 17:39 ` Eugenio Perez Martin
2023-08-29 5:54 ` [PATCH v4 3/8] vhost: Expose vhost_svq_available_slots() Hawkins Jiawei
2023-10-03 17:44 ` Eugenio Perez Martin
2023-10-08 1:35 ` Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 4/8] vdpa: Avoid using vhost_vdpa_net_load_*() outside vhost_vdpa_net_load() Hawkins Jiawei
2023-10-03 17:48 ` Eugenio Perez Martin
2023-10-08 1:38 ` Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 5/8] vdpa: Check device ack in vhost_vdpa_net_load_rx_mode() Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 6/8] vdpa: Move vhost_svq_poll() to the caller of vhost_vdpa_net_cvq_add() Hawkins Jiawei
2023-08-29 5:54 ` [PATCH v4 7/8] vdpa: Introduce cursors to vhost_vdpa_net_loadx() Hawkins Jiawei
2023-10-04 7:21 ` Eugenio Perez Martin
2023-10-08 2:03 ` Hawkins Jiawei
2023-08-29 5:54 ` Hawkins Jiawei [this message]
2023-10-04 7:33 ` [PATCH v4 8/8] vdpa: Send cvq state load commands in parallel Eugenio Perez Martin
2023-10-08 2:24 ` Hawkins Jiawei
2023-08-29 9:32 ` [PATCH v4 0/8] vdpa: Send all CVQ " Hawkins Jiawei
2023-10-01 19:56 ` Michael S. Tsirkin
2023-10-03 18:21 ` Eugenio Perez Martin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f25fea0b0aed78bad2dd5744a4cc5538243672e6.1693287885.git.yin31149@gmail.com \
--to=yin31149@gmail.com \
--cc=18801353760@163.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=leiyang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.