qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: qemu-devel@nongnu.org, mst@redhat.com, jasowang@redhat.com
Cc: eperezma@redhat.com, elic@nvidia.com, lingshan.zhu@intel.com,
	lulu@redhat.com
Subject: [PATCH V2 15/18] vhost-net: control virtqueue support
Date: Tue,  6 Jul 2021 16:27:14 +0800	[thread overview]
Message-ID: <20210706082717.37730-16-jasowang@redhat.com> (raw)
In-Reply-To: <20210706082717.37730-1-jasowang@redhat.com>

We assume there's no cvq in the past, this is not true when we need
control virtqueue support for vhost-user backends. So this patch
implements the control virtqueue support for vhost-net. As datapath,
the control virtqueue is also required to be coupled with the
NetClientState. The vhost_net_start/stop() are tweaked to accept the
number of datapath queue pairs plus the the number of control
virtqueue for us to start and stop the vhost device.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/vhost_net.c      | 43 ++++++++++++++++++++++++++++++-----------
 hw/net/virtio-net.c     |  4 ++--
 include/net/vhost_net.h |  6 ++++--
 3 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index ef1370bd92..4294fb9fc9 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -311,11 +311,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
 }
 
 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
     VirtioBusState *vbus = VIRTIO_BUS(qbus);
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    int total_notifiers = data_qps * 2 + cvq;
+    VirtIONet *n = VIRTIO_NET(dev);
+    int nvhosts = data_qps + cvq;
     struct vhost_net *net;
     int r, e, i;
     NetClientState *peer;
@@ -325,9 +328,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         return -ENOSYS;
     }
 
-    for (i = 0; i < total_queues; i++) {
+    for (i = 0; i < nvhosts; i++) {
+
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else { /* Control Virtqueue */
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
 
-        peer = qemu_get_peer(ncs, i);
         net = get_vhost_net(peer);
         vhost_net_set_vq_index(net, i * 2);
 
@@ -340,14 +348,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         }
      }
 
-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
         goto err;
     }
 
-    for (i = 0; i < total_queues; i++) {
-        peer = qemu_get_peer(ncs, i);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
         r = vhost_net_start_one(get_vhost_net(peer), dev);
 
         if (r < 0) {
@@ -371,7 +383,7 @@ err_start:
         peer = qemu_get_peer(ncs , i);
         vhost_net_stop_one(get_vhost_net(peer), dev);
     }
-    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
     if (e < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
         fflush(stderr);
@@ -381,18 +393,27 @@ err:
 }
 
 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
-                    int total_queues)
+                    int data_qps, int cvq)
 {
     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
     VirtioBusState *vbus = VIRTIO_BUS(qbus);
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+    VirtIONet *n = VIRTIO_NET(dev);
+    NetClientState *peer;
+    int total_notifiers = data_qps * 2 + cvq;
+    int nvhosts = data_qps + cvq;
     int i, r;
 
-    for (i = 0; i < total_queues; i++) {
-        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+    for (i = 0; i < nvhosts; i++) {
+        if (i < data_qps) {
+            peer = qemu_get_peer(ncs, i);
+        } else {
+            peer = qemu_get_peer(ncs, n->max_queues);
+        }
+        vhost_net_stop_one(get_vhost_net(peer), dev);
     }
 
-    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
+    r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
     if (r < 0) {
         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
         fflush(stderr);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index bd7958b9f0..614660274c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
         }
 
         n->vhost_started = 1;
-        r = vhost_net_start(vdev, n->nic->ncs, queues);
+        r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
         if (r < 0) {
             error_report("unable to start vhost net: %d: "
                          "falling back on userspace virtio", -r);
             n->vhost_started = 0;
         }
     } else {
-        vhost_net_stop(vdev, n->nic->ncs, queues);
+        vhost_net_stop(vdev, n->nic->ncs, queues, 0);
         n->vhost_started = 0;
     }
 }
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index fba40cf695..e656e38af9 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
 uint64_t vhost_net_get_max_queues(VHostNetState *net);
 struct vhost_net *vhost_net_init(VhostNetOptions *options);
 
-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+                    int data_qps, int cvq);
 
 void vhost_net_cleanup(VHostNetState *net);
 
-- 
2.25.1



  parent reply	other threads:[~2021-07-06  8:39 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-06  8:26 [PATCH V2 00/18] vhost-vDPA multiqueue Jason Wang
2021-07-06  8:27 ` [PATCH V2 01/18] vhost_net: remove the meaningless assignment in vhost_net_start_one() Jason Wang
2021-07-06  8:27 ` [PATCH V2 02/18] vhost: use unsigned int for nvqs Jason Wang
2021-07-06  8:27 ` [PATCH V2 03/18] vhost_net: do not assume nvqs is always 2 Jason Wang
2021-07-06  8:27 ` [PATCH V2 04/18] vhost-vdpa: remove the unnecessary check in vhost_vdpa_add() Jason Wang
2021-07-06  8:27 ` [PATCH V2 05/18] vhost-vdpa: don't cleanup twice " Jason Wang
2021-07-06  8:27 ` [PATCH V2 06/18] vhost-vdpa: fix leaking of vhost_net " Jason Wang
2021-07-06  8:27 ` [PATCH V2 07/18] vhost-vdpa: tweak the error label " Jason Wang
2021-07-06  8:27 ` [PATCH V2 08/18] vhost-vdpa: fix the wrong assertion in vhost_vdpa_init() Jason Wang
2021-07-06  8:27 ` [PATCH V2 09/18] vhost-vdpa: remove the unncessary queue_index assignment Jason Wang
2021-07-06  8:27 ` [PATCH V2 10/18] vhost-vdpa: open device fd in net_init_vhost_vdpa() Jason Wang
2021-07-06  8:27 ` [PATCH V2 11/18] vhost-vdpa: classify one time request Jason Wang
2021-07-06  8:27 ` [PATCH V2 12/18] vhost-vdpa: prepare for the multiqueue support Jason Wang
2021-07-06  8:27 ` [PATCH V2 13/18] vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState * Jason Wang
2021-07-06  8:27 ` [PATCH V2 14/18] net: introduce control client Jason Wang
2021-07-06  8:27 ` Jason Wang [this message]
2021-07-06  8:27 ` [PATCH V2 16/18] virito-net: use "qps" instead of "queues" when possible Jason Wang
2021-07-06  8:27 ` [PATCH V2 17/18] virtio-net: vhost control virtqueue support Jason Wang
2021-07-06  8:27 ` [PATCH V2 18/18] vhost-vdpa: multiqueue support Jason Wang
2021-07-12  5:44 ` [PATCH V2 00/18] vhost-vDPA multiqueue Jason Wang
2021-07-12 13:15   ` Michael S. Tsirkin
     [not found]     ` <CACGkMEs_sNOqdsDvpMR+Mx7TXY2wW8p_NVALvHLPgeAsiWNTGA@mail.gmail.com>
     [not found]       ` <20210713114825-mutt-send-email-mst@kernel.org>
2021-07-14  2:00         ` Jason Wang
2021-07-15  4:24   ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210706082717.37730-16-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=lulu@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).