From: Cindy Lu <lulu@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: QEMU Developers <qemu-devel@nongnu.org>,
Michael Tsirkin <mst@redhat.com>
Subject: Re: [PATCH v6 1/9] hw: Add check for queue number
Date: Thu, 29 Apr 2021 11:08:10 +0800 [thread overview]
Message-ID: <CACLfguU1Vg0YOYK9hzcbUp0pvJ+E7dhZBSbxKDUzERYLjevdDA@mail.gmail.com> (raw)
In-Reply-To: <1d1c7244-ac00-94c0-8f53-90b1b93c41a0@redhat.com>
On Tue, Apr 27, 2021 at 1:39 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2021/4/27 上午11:39, Cindy Lu 写道:
> > In order to support configure interrupt. we will use queue number -1
> > as configure interrupt
> > since all these device are not support the configure interrupt
> > So we will add an check here, if the idx is -1, the function
> > will return;
>
>
> The title is confusing since the change is specific for the guest notifiers.
>
> A better one would be "virtio: guest notifier support for config interrupt"
>
sure, will fix this
>
> >
> > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > ---
> > hw/display/vhost-user-gpu.c | 8 ++++++--
> > hw/net/virtio-net.c | 10 +++++++---
> > hw/virtio/vhost-user-fs.c | 11 +++++++----
> > hw/virtio/vhost-vsock-common.c | 8 ++++++--
> > hw/virtio/virtio-crypto.c | 8 ++++++--
> > 5 files changed, 32 insertions(+), 13 deletions(-)
> >
> > diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
> > index 51f1747c4a..d8e26cedf1 100644
> > --- a/hw/display/vhost-user-gpu.c
> > +++ b/hw/display/vhost-user-gpu.c
> > @@ -490,7 +490,9 @@ static bool
> > vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > {
> > VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > + if (idx == -1) {
>
>
> Let's introduce a macro for this instead of the magic number.
>
> Thanks
>
>
sure will fix this
> > + return false;
> > + }
> > return vhost_virtqueue_pending(&g->vhost->dev, idx);
> > }
> >
> > @@ -498,7 +500,9 @@ static void
> > vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> > {
> > VhostUserGPU *g = VHOST_USER_GPU(vdev);
> > -
> > + if (idx == -1) {
> > + return;
> > + }
> > vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
> > }
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 9179013ac4..78ccaa228c 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -3060,7 +3060,10 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > VirtIONet *n = VIRTIO_NET(vdev);
> > NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > assert(n->vhost_started);
> > - return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > + if (idx != -1) {
> > + return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> > + }
> > + return false;
> > }
> >
> > static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > @@ -3069,8 +3072,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > VirtIONet *n = VIRTIO_NET(vdev);
> > NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > assert(n->vhost_started);
> > - vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> > - vdev, idx, mask);
> > + if (idx != -1) {
> > + vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
> > + }
> > }
> >
> > static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
> > diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
> > index 1bc5d03a00..37424c2193 100644
> > --- a/hw/virtio/vhost-user-fs.c
> > +++ b/hw/virtio/vhost-user-fs.c
> > @@ -142,18 +142,21 @@ static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> > */
> > }
> >
> > -static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > - bool mask)
> > +static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
> > {
> > VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > + if (idx == -1) {
> > + return;
> > + }
> > vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
> > }
> >
> > static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > {
> > VHostUserFS *fs = VHOST_USER_FS(vdev);
> > -
> > + if (idx == -1) {
> > + return false;
> > + }
> > return vhost_virtqueue_pending(&fs->vhost_dev, idx);
> > }
> >
> > diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> > index 5b2ebf3496..0adf823d37 100644
> > --- a/hw/virtio/vhost-vsock-common.c
> > +++ b/hw/virtio/vhost-vsock-common.c
> > @@ -100,7 +100,9 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > bool mask)
> > {
> > VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > + if (idx == -1) {
> > + return;
> > + }
> > vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask);
> > }
> >
> > @@ -108,7 +110,9 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
> > int idx)
> > {
> > VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> > -
> > + if (idx == -1) {
> > + return false;
> > + }
> > return vhost_virtqueue_pending(&vvc->vhost_dev, idx);
> > }
> >
> > diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> > index 54f9bbb789..c47f4ffb24 100644
> > --- a/hw/virtio/virtio-crypto.c
> > +++ b/hw/virtio/virtio-crypto.c
> > @@ -947,7 +947,9 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
> > int queue = virtio_crypto_vq2q(idx);
> >
> > assert(vcrypto->vhost_started);
> > -
> > + if (idx == -1) {
> > + return;
> > + }
> > cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask);
> > }
> >
> > @@ -957,7 +959,9 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
> > int queue = virtio_crypto_vq2q(idx);
> >
> > assert(vcrypto->vhost_started);
> > -
> > + if (idx == -1) {
> > + return false;
> > + }
> > return cryptodev_vhost_virtqueue_pending(vdev, queue, idx);
> > }
> >
>
next prev parent reply other threads:[~2021-04-29 3:09 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-27 3:39 [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-04-27 3:39 ` [PATCH v6 1/9] hw: Add check for queue number Cindy Lu
2021-04-27 5:39 ` Jason Wang
2021-04-29 3:08 ` Cindy Lu [this message]
2021-04-27 3:39 ` [PATCH v6 2/9] virtio-pci:decouple virtqueue from interrupt setting process Cindy Lu
2021-04-27 6:40 ` Jason Wang
2021-04-27 7:17 ` Jason Wang
2021-04-27 3:39 ` [PATCH v6 3/9] vhost: add new call back function for config interrupt Cindy Lu
2021-04-27 3:39 ` [PATCH v6 4/9] vhost-vdpa: add support for config interrupt call back Cindy Lu
2021-04-27 3:39 ` [PATCH v6 5/9] vhost:add support for configure interrupt Cindy Lu
2021-04-27 7:04 ` Jason Wang
2021-04-27 3:39 ` [PATCH v6 6/9] virtio-mmio: add " Cindy Lu
2021-04-27 3:39 ` [PATCH v6 7/9] virtio-pci: " Cindy Lu
2021-04-27 7:12 ` Jason Wang
2021-04-29 3:07 ` Cindy Lu
2021-04-27 3:39 ` [PATCH v6 8/9] virtio: decouple virtqueue from set notifier fd handler Cindy Lu
2021-04-27 7:14 ` Jason Wang
2021-04-27 3:39 ` [PATCH v6 9/9] virtio-net: add peer_deleted check in virtio_net_handle_rx Cindy Lu
2021-04-27 7:15 ` Jason Wang
2021-04-27 3:57 ` [PATCH v6 0/9] vhost-vdpa: add support for configure interrupt no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CACLfguU1Vg0YOYK9hzcbUp0pvJ+E7dhZBSbxKDUzERYLjevdDA@mail.gmail.com \
--to=lulu@redhat.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).