All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eugenio Perez Martin <eperezma@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: qemu-devel <qemu-devel@nongnu.org>,
	Gautam Dawar <gdawar@xilinx.com>,
	 Paolo Bonzini <pbonzini@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	 Stefano Garzarella <sgarzare@redhat.com>,
	Eric Blake <eblake@redhat.com>,
	 Zhu Lingshan <lingshan.zhu@intel.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	 Markus Armbruster <armbru@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Parav Pandit <parav@mellanox.com>,
	 Laurent Vivier <lvivier@redhat.com>,
	Liuxiangdong <liuxiangdong5@huawei.com>,
	 Eli Cohen <eli@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	 Harpreet Singh Anand <hanand@xilinx.com>,
	"Gonglei (Arei)" <arei.gonglei@huawei.com>
Subject: Re: [PATCH v2 15/19] vdpa: manual forward CVQ buffers
Date: Fri, 15 Jul 2022 11:01:50 +0200	[thread overview]
Message-ID: <CAJaqyWeFS9wUxxZh3oQWaMJgdtf-2Z5XHidMBWGCWSco5B77LQ@mail.gmail.com> (raw)
In-Reply-To: <CACGkMEuETovdsG1J=bbYdvevCxErbobt8WA9P4wAp=s8sfjing@mail.gmail.com>

On Fri, Jul 15, 2022 at 10:44 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Jul 15, 2022 at 1:34 PM Eugenio Perez Martin
> <eperezma@redhat.com> wrote:
> >
> > On Fri, Jul 15, 2022 at 6:08 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Fri, Jul 15, 2022 at 12:32 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > >
> > > > Do a simple forwarding of CVQ buffers, the same work SVQ could do but
> > > > through callbacks. No functional change intended.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > ---
> > > >  include/hw/virtio/vhost-vdpa.h |  3 ++
> > > >  hw/virtio/vhost-vdpa.c         |  3 +-
> > > >  net/vhost-vdpa.c               | 58 ++++++++++++++++++++++++++++++++++
> > > >  3 files changed, 63 insertions(+), 1 deletion(-)
> > > >
> > > > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > > > index 7214eb47dc..1111d85643 100644
> > > > --- a/include/hw/virtio/vhost-vdpa.h
> > > > +++ b/include/hw/virtio/vhost-vdpa.h
> > > > @@ -15,6 +15,7 @@
> > > >  #include <gmodule.h>
> > > >
> > > >  #include "hw/virtio/vhost-iova-tree.h"
> > > > +#include "hw/virtio/vhost-shadow-virtqueue.h"
> > > >  #include "hw/virtio/virtio.h"
> > > >  #include "standard-headers/linux/vhost_types.h"
> > > >
> > > > @@ -35,6 +36,8 @@ typedef struct vhost_vdpa {
> > > >      /* IOVA mapping used by the Shadow Virtqueue */
> > > >      VhostIOVATree *iova_tree;
> > > >      GPtrArray *shadow_vqs;
> > > > +    const VhostShadowVirtqueueOps *shadow_vq_ops;
> > > > +    void *shadow_vq_ops_opaque;
> > > >      struct vhost_dev *dev;
> > > >      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> > > >  } VhostVDPA;
> > > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > > index 96997210be..beaaa7049a 100644
> > > > --- a/hw/virtio/vhost-vdpa.c
> > > > +++ b/hw/virtio/vhost-vdpa.c
> > > > @@ -419,7 +419,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
> > > >      for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > > >          g_autoptr(VhostShadowVirtqueue) svq;
> > > >
> > > > -        svq = vhost_svq_new(v->iova_tree, NULL, NULL);
> > > > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > > +                            v->shadow_vq_ops_opaque);
> > > >          if (unlikely(!svq)) {
> > > >              error_setg(errp, "Cannot create svq %u", n);
> > > >              return -1;
> > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > index df1e69ee72..805c9dd6b6 100644
> > > > --- a/net/vhost-vdpa.c
> > > > +++ b/net/vhost-vdpa.c
> > > > @@ -11,11 +11,14 @@
> > > >
> > > >  #include "qemu/osdep.h"
> > > >  #include "clients.h"
> > > > +#include "hw/virtio/virtio-net.h"
> > > >  #include "net/vhost_net.h"
> > > >  #include "net/vhost-vdpa.h"
> > > >  #include "hw/virtio/vhost-vdpa.h"
> > > >  #include "qemu/config-file.h"
> > > >  #include "qemu/error-report.h"
> > > > +#include "qemu/log.h"
> > > > +#include "qemu/memalign.h"
> > > >  #include "qemu/option.h"
> > > >  #include "qapi/error.h"
> > > >  #include <linux/vhost.h>
> > > > @@ -187,6 +190,57 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > >          .check_peer_type = vhost_vdpa_check_peer_type,
> > > >  };
> > > >
> > > > +/**
> > > > + * Forward buffer for the moment.
> > > > + */
> > > > +static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> > > > +                                            SVQElement *svq_elem, void *opaque)
> > > > +{
> > > > +    VirtQueueElement *elem = &svq_elem->elem;
> > > > +    unsigned int n = elem->out_num + elem->in_num;
> > > > +    g_autofree struct iovec *dev_buffers = g_new(struct iovec, n);
> > > > +    size_t in_len, dev_written;
> > > > +    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
> > > > +    int r;
> > > > +
> > > > +    memcpy(dev_buffers, elem->out_sg, elem->out_num);
> > > > +    memcpy(dev_buffers + elem->out_num, elem->in_sg, elem->in_num);
> > > > +
> > > > +    r = vhost_svq_add(svq, &dev_buffers[0], elem->out_num, &dev_buffers[1],
> > > > +                      elem->in_num, svq_elem);
> > > > +    if (unlikely(r != 0)) {
> > > > +        if (unlikely(r == -ENOSPC)) {
> > > > +            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
> > > > +                          __func__);
> > > > +        }
> > > > +        goto out;
> > > > +    }
> > > > +
> > > > +    /*
> > > > +     * We can poll here since we've had BQL from the time we sent the
> > > > +     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
> > > > +     * when BQL is released
> > > > +     */
> > > > +    dev_written = vhost_svq_poll(svq);
> > > > +    if (unlikely(dev_written < sizeof(status))) {
> > > > +        error_report("Insufficient written data (%zu)", dev_written);
> > > > +    }
> > > > +
> > > > +out:
> > > > +    in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
> > > > +                          sizeof(status));
> > > > +    if (unlikely(in_len < sizeof(status))) {
> > > > +        error_report("Bad device CVQ written length");
> > > > +    }
> > > > +    vhost_svq_push_elem(svq, svq_elem, MIN(in_len, sizeof(status)));
> > > > +    g_free(svq_elem);
> > > > +    return r;
> > > > +}
> > > > +
> > > > +static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
> > > > +    .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
> > > > +};
> > >
> > > I wonder if it's possible to even remove this handler. Can we let the
> > > kick to be handled by virtio_net_handler_ctrl() in virtio-net.c?
> > >
> >
> > I kind of drafted that here:
> > https://lists.nongnu.org/archive/html/qemu-devel/2022-07/msg02652.html
> >
> > But I'm not sure about the part of not enabling the guest event
> > notifiers. I'd say we would need to move SVQ to the VirtIODevice
> > itself, which seems rather complicated at this moment.
> >
> > Without applying that patch, all of the events of vDPA devices go
> > either to device itself or to the SVQ registered handlers. If we go
> > that route, we add the vq event handler, and that would depend on the
> > svq state + specific vq.
>
> So what's in my mind is to let it work more like existing vhost
> backends. In that case the shadow virtqueue is only used as one way to
> communicate with the vhost-backend:
>
> 1) vhost-net using TAP ioctl
> 2) vhost-user using UNIX domain socket
> 3) vhost-vDPA is using shadow cvq
>

Yes, but how do you get SVQ event handlers called?

Data vqs are currently registering by themselves on
vhost_svq_set_svq_kick_fd / vhost_svq_new. With this proposal, data
vqs would keep that way but cvq will be registered by other means,
making SVQ hard to reason about.

With this proposal, the right thing to do is to move everything to
virtqueues handle_output callbacks. I like the proposal a lot: that
way we slim down SVQ by a lot, and SVQ will benefit for every
improvement we do to VirtQueues kick handling (coroutines, iothreads,
...). However to move everything comes with problems: SVQ code can be
called outside of the event loop.

Both that and not calling vhost_dev_enable_notifiers will be severely
untested at this point, not sure if other vhost devices do that but I
think not.

Thanks!



  reply	other threads:[~2022-07-15  9:07 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-14 16:31 [PATCH v2 00/19] vdpa net devices Rx filter change notification with Shadow VQ Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 01/19] vhost: move descriptor translation to vhost_svq_vring_write_descs Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 02/19] virtio-net: Expose MAC_TABLE_ENTRIES Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 03/19] virtio-net: Expose ctrl virtqueue logic Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 04/19] vhost: Reorder vhost_svq_kick Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 05/19] vhost: Move vhost_svq_kick call to vhost_svq_add Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 06/19] vhost: Check for queue full at vhost_svq_add Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 07/19] vhost: Decouple vhost_svq_add from VirtQueueElement Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 08/19] vhost: Add SVQElement Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 09/19] vhost: Track number of descs in SVQElement Eugenio Pérez
2022-07-15  4:10   ` Jason Wang
2022-07-15  5:41     ` Eugenio Perez Martin
2022-07-14 16:31 ` [PATCH v2 10/19] vhost: add vhost_svq_push_elem Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 11/19] vhost: Expose vhost_svq_add Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 12/19] vhost: add vhost_svq_poll Eugenio Pérez
2022-07-15  3:58   ` Jason Wang
2022-07-15  5:38     ` Eugenio Perez Martin
2022-07-15  8:47       ` Jason Wang
2022-07-15 17:05         ` Eugenio Perez Martin
2022-07-19  7:38           ` Jason Wang
2022-07-19  8:42             ` Eugenio Perez Martin
2022-07-19  8:48               ` Jason Wang
2022-07-19  9:09                 ` Eugenio Perez Martin
2022-07-14 16:31 ` [PATCH v2 13/19] vhost: Add svq avail_handler callback Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 14/19] vdpa: Export vhost_vdpa_dma_map and unmap calls Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 15/19] vdpa: manual forward CVQ buffers Eugenio Pérez
2022-07-15  4:08   ` Jason Wang
2022-07-15  5:33     ` Eugenio Perez Martin
2022-07-15  8:44       ` Jason Wang
2022-07-15  9:01         ` Eugenio Perez Martin [this message]
2022-07-14 16:31 ` [PATCH v2 16/19] vdpa: Buffer CVQ support on shadow virtqueue Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 17/19] vdpa: Extract get features part from vhost_vdpa_get_max_queue_pairs Eugenio Pérez
2022-07-14 16:31 ` [PATCH v2 18/19] vdpa: Add device migration blocker Eugenio Pérez
2022-07-15  4:03   ` Jason Wang
2022-07-15  5:39     ` Eugenio Perez Martin
2022-07-15  8:50       ` Jason Wang
2022-07-15  9:05         ` Eugenio Perez Martin
2022-07-15 16:13           ` Eugenio Perez Martin
2022-07-22 13:29         ` Eugenio Perez Martin
2022-07-14 16:31 ` [PATCH v2 19/19] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-07-15  4:13   ` Jason Wang
2022-07-15  6:09     ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAJaqyWeFS9wUxxZh3oQWaMJgdtf-2Z5XHidMBWGCWSco5B77LQ@mail.gmail.com \
    --to=eperezma@redhat.com \
    --cc=arei.gonglei@huawei.com \
    --cc=armbru@redhat.com \
    --cc=cohuck@redhat.com \
    --cc=eblake@redhat.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.