qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Eli Cohen <elic@nvidia.com>
To: Jason Wang <jasowang@redhat.com>
Cc: eperezma@redhat.com, lingshan.zhu@intel.com,
	qemu-devel@nongnu.org, lulu@redhat.com, mst@redhat.com
Subject: Re: [PATCH 15/18] vhost-net: control virtqueue support
Date: Thu, 24 Jun 2021 10:42:17 +0300	[thread overview]
Message-ID: <20210624074217.GB42206@mtl-vdi-166.wap.labs.mlnx> (raw)
In-Reply-To: <20210621041650.5826-16-jasowang@redhat.com>

On Mon, Jun 21, 2021 at 12:16:47PM +0800, Jason Wang wrote:
> We assume there's no cvq in the past, this is not true when we need
> control virtqueue support for vhost-user backends. So this patch
> implements the control virtqueue support for vhost-net. As datapath,
> the control virtqueue is also required to be coupled with the
> NetClientState. The vhost_net_start/stop() are tweaked to accept the
> number of datapath queue pairs plus the the number of control
> virtqueue for us to start and stop the vhost device.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  hw/net/vhost_net.c      | 43 ++++++++++++++++++++++++++++++-----------
>  hw/net/virtio-net.c     |  4 ++--
>  include/net/vhost_net.h |  6 ++++--
>  3 files changed, 38 insertions(+), 15 deletions(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index ef1370bd92..fe2fd7e3d5 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -311,11 +311,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
>  }
>  
>  int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> -                    int total_queues)
> +                    int data_qps, int cvq)
>  {
>      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
>      VirtioBusState *vbus = VIRTIO_BUS(qbus);
>      VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
> +    int total_notifiers = data_qps * 2 + cvq;
> +    VirtIONet *n = VIRTIO_NET(dev);
> +    int nvhosts = data_qps + cvq;
>      struct vhost_net *net;
>      int r, e, i;
>      NetClientState *peer;
> @@ -325,9 +328,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
>          return -ENOSYS;
>      }
>  
> -    for (i = 0; i < total_queues; i++) {
> +    for (i = 0; i < nvhosts; i++) {
> +
> +        if (i < data_qps) {
> +            peer = qemu_get_peer(ncs, i);
> +        } else { /* Control Virtqueue */
> +            peer = qemu_get_peer(ncs, n->max_qps);
> +        }
>  
> -        peer = qemu_get_peer(ncs, i);
>          net = get_vhost_net(peer);
>          vhost_net_set_vq_index(net, i * 2);
>  
> @@ -340,14 +348,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
>          }
>       }
>  
> -    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> +    r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
>      if (r < 0) {
>          error_report("Error binding guest notifier: %d", -r);
>          goto err;
>      }
>  
> -    for (i = 0; i < total_queues; i++) {
> -        peer = qemu_get_peer(ncs, i);
> +    for (i = 0; i < nvhosts; i++) {
> +        if (i < data_qps) {
> +            peer = qemu_get_peer(ncs, i);
> +        } else {
> +            peer = qemu_get_peer(ncs, n->max_qps);
> +        }
>          r = vhost_net_start_one(get_vhost_net(peer), dev);
>  
>          if (r < 0) {
> @@ -371,7 +383,7 @@ err_start:
>          peer = qemu_get_peer(ncs , i);
>          vhost_net_stop_one(get_vhost_net(peer), dev);
>      }
> -    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
> +    e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
>      if (e < 0) {
>          fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
>          fflush(stderr);
> @@ -381,18 +393,27 @@ err:
>  }
>  
>  void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
> -                    int total_queues)
> +                    int data_qps, int cvq)
>  {
>      BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
>      VirtioBusState *vbus = VIRTIO_BUS(qbus);
>      VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
> +    VirtIONet *n = VIRTIO_NET(dev);
> +    NetClientState *peer;
> +    int total_notifiers = data_qps * 2 + cvq;
> +    int nvhosts = data_qps + cvq;
>      int i, r;
>  
> -    for (i = 0; i < total_queues; i++) {
> -        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
> +    for (i = 0; i < nvhosts; i++) {
> +        if (i < data_qps) {
> +            peer = qemu_get_peer(ncs, i);
> +        } else {
> +            peer = qemu_get_peer(ncs, n->max_qps);
> +        }
> +        vhost_net_stop_one(get_vhost_net(peer), dev);
>      }
>  
> -    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
> +    r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
>      if (r < 0) {
>          fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
>          fflush(stderr);
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index bd7958b9f0..614660274c 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
>          }
>  
>          n->vhost_started = 1;
> -        r = vhost_net_start(vdev, n->nic->ncs, queues);
> +        r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
>          if (r < 0) {
>              error_report("unable to start vhost net: %d: "
>                           "falling back on userspace virtio", -r);
>              n->vhost_started = 0;
>          }
>      } else {
> -        vhost_net_stop(vdev, n->nic->ncs, queues);
> +        vhost_net_stop(vdev, n->nic->ncs, queues, 0);
>          n->vhost_started = 0;
>      }
>  }
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index fba40cf695..e656e38af9 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
>  uint64_t vhost_net_get_max_queues(VHostNetState *net);
>  struct vhost_net *vhost_net_init(VhostNetOptions *options);
>  
> -int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
> -void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
This breaks compilation of hw/net/vhost_net-stub.c

> +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> +                    int data_qps, int cvq);
> +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
> +                    int data_qps, int cvq);
>  
>  void vhost_net_cleanup(VHostNetState *net);
>  
> -- 
> 2.25.1
> 


  reply	other threads:[~2021-06-24  7:43 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-21  4:16 [PATCH 00/18] vhost-vDPA multiqueue Jason Wang
2021-06-21  4:16 ` [PATCH 01/18] vhost_net: remove the meaningless assignment in vhost_net_start_one() Jason Wang
2021-06-21 11:45   ` Eli Cohen
2021-06-24  7:42     ` Jason Wang
2021-06-21  4:16 ` [PATCH 02/18] vhost: use unsigned int for nvqs Jason Wang
2021-06-21 11:46   ` Eli Cohen
2021-06-21  4:16 ` [PATCH 03/18] vhost_net: do not assume nvqs is always 2 Jason Wang
2021-06-23 14:49   ` Stefano Garzarella
2021-06-24  6:22   ` Eli Cohen
2021-06-24  7:42     ` Jason Wang
2021-06-21  4:16 ` [PATCH 04/18] vhost-vdpa: remove the unnecessary check in vhost_vdpa_add() Jason Wang
2021-06-23 14:53   ` Stefano Garzarella
2021-06-24  6:38     ` Eli Cohen
2021-06-24  7:46     ` Jason Wang
2021-06-21  4:16 ` [PATCH 05/18] vhost-vdpa: don't cleanup twice " Jason Wang
2021-06-23 14:56   ` Stefano Garzarella
2021-06-21  4:16 ` [PATCH 06/18] vhost-vdpa: fix leaking of vhost_net " Jason Wang
2021-06-23 15:00   ` Stefano Garzarella
2021-06-24  7:06     ` Eli Cohen
2021-06-24  7:10       ` Jason Wang
2021-06-24  7:32         ` Eli Cohen
2021-06-24  7:14     ` Eli Cohen
2021-06-24  7:41       ` Jason Wang
2021-06-21  4:16 ` [PATCH 07/18] vhost-vdpa: tweak the error label " Jason Wang
2021-06-23 15:03   ` Stefano Garzarella
2021-07-06  8:03     ` Jason Wang
2021-07-06  8:10       ` Jason Wang
2021-07-06  8:27         ` Stefano Garzarella
2021-07-06  8:28           ` Jason Wang
2021-06-21  4:16 ` [PATCH 08/18] vhost-vdpa: fix the wrong assertion in vhost_vdpa_init() Jason Wang
2021-06-23 15:04   ` Stefano Garzarella
2021-06-21  4:16 ` [PATCH 09/18] vhost-vdpa: remove the unncessary queue_index assignment Jason Wang
2021-06-23 15:05   ` Stefano Garzarella
2021-06-21  4:16 ` [PATCH 10/18] vhost-vdpa: open device fd in net_init_vhost_vdpa() Jason Wang
2021-06-23 15:07   ` Stefano Garzarella
2021-06-21  4:16 ` [PATCH 11/18] vhost-vdpa: classify one time request Jason Wang
2021-06-21  4:16 ` [PATCH 12/18] vhost-vdpa: prepare for the multiqueue support Jason Wang
2021-06-21  4:16 ` [PATCH 13/18] vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState * Jason Wang
2021-06-21  4:16 ` [PATCH 14/18] net: introduce control client Jason Wang
2021-06-21  4:16 ` [PATCH 15/18] vhost-net: control virtqueue support Jason Wang
2021-06-24  7:42   ` Eli Cohen [this message]
2021-06-24  7:44     ` Jason Wang
2021-06-30 17:33   ` Eugenio Perez Martin
2021-07-01  3:03     ` Jason Wang
2021-06-21  4:16 ` [PATCH 16/18] virito-net: use "qps" instead of "queues" when possible Jason Wang
2021-06-21  4:16 ` [PATCH 17/18] virtio-net: vhost control virtqueue support Jason Wang
2021-06-21  4:16 ` [PATCH 18/18] vhost-vdpa: multiqueue support Jason Wang
2021-07-01  6:51   ` Eugenio Perez Martin
2021-07-01  8:15     ` Jason Wang
2021-07-06  7:46     ` Jason Wang
2021-06-21  4:33 ` [PATCH 00/18] vhost-vDPA multiqueue no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210624074217.GB42206@mtl-vdi-166.wap.labs.mlnx \
    --to=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=lulu@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).