From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: "Zhu, Lingshan" <lingshan.zhu@intel.com>,
alex.williamson@redhat.com, pbonzini@redhat.com,
sean.j.christopherson@intel.com, wanpengli@tencent.com,
virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org, kvm@vger.kernel.org, eli@mellanox.com,
shahafs@mellanox.com, parav@mellanox.com
Subject: Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
Date: Tue, 4 Aug 2020 05:21:21 -0400 [thread overview]
Message-ID: <20200804052050-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <156b8d71-6870-c163-fdfa-35bf4701987d@redhat.com>
On Tue, Aug 04, 2020 at 04:53:39PM +0800, Jason Wang wrote:
>
> On 2020/8/4 下午4:42, Zhu, Lingshan wrote:
> >
> >
> > On 8/4/2020 4:38 PM, Jason Wang wrote:
> > >
> > > On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> > > > This commit introduces struct vhost_vring_call which replaced
> > > > raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
> > > > Besides eventfd_ctx, it contains a spin lock and an
> > > > irq_bypass_producer in its structure.
> > > >
> > > > Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> > > > Suggested-by: Jason Wang <jasowang@redhat.com>
> > > > ---
> > > > drivers/vhost/vdpa.c | 4 ++--
> > > > drivers/vhost/vhost.c | 22 ++++++++++++++++------
> > > > drivers/vhost/vhost.h | 9 ++++++++-
> > > > 3 files changed, 26 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> > > > index a54b60d6623f..df3cf386b0cd 100644
> > > > --- a/drivers/vhost/vdpa.c
> > > > +++ b/drivers/vhost/vdpa.c
> > > > @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
> > > > static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
> > > > {
> > > > struct vhost_virtqueue *vq = private;
> > > > - struct eventfd_ctx *call_ctx = vq->call_ctx;
> > > > + struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
> > > > if (call_ctx)
> > > > eventfd_signal(call_ctx, 1);
> > > > @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct
> > > > vhost_vdpa *v, unsigned int cmd,
> > > > break;
> > > > case VHOST_SET_VRING_CALL:
> > > > - if (vq->call_ctx) {
> > > > + if (vq->call_ctx.ctx) {
> > > > cb.callback = vhost_vdpa_virtqueue_cb;
> > > > cb.private = vq;
> > > > } else {
> > > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> > > > index d7b8df3edffc..9f1a845a9302 100644
> > > > --- a/drivers/vhost/vhost.c
> > > > +++ b/drivers/vhost/vhost.c
> > > > @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct
> > > > vhost_dev *d)
> > > > __vhost_vq_meta_reset(d->vqs[i]);
> > > > }
> > > > +static void vhost_vring_call_reset(struct vhost_vring_call
> > > > *call_ctx)
> > > > +{
> > > > + call_ctx->ctx = NULL;
> > > > + memset(&call_ctx->producer, 0x0, sizeof(struct
> > > > irq_bypass_producer));
> > > > + spin_lock_init(&call_ctx->ctx_lock);
> > > > +}
> > > > +
> > > > static void vhost_vq_reset(struct vhost_dev *dev,
> > > > struct vhost_virtqueue *vq)
> > > > {
> > > > @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
> > > > vq->log_base = NULL;
> > > > vq->error_ctx = NULL;
> > > > vq->kick = NULL;
> > > > - vq->call_ctx = NULL;
> > > > vq->log_ctx = NULL;
> > > > vhost_reset_is_le(vq);
> > > > vhost_disable_cross_endian(vq);
> > > > vq->busyloop_timeout = 0;
> > > > vq->umem = NULL;
> > > > vq->iotlb = NULL;
> > > > + vhost_vring_call_reset(&vq->call_ctx);
> > > > __vhost_vq_meta_reset(vq);
> > > > }
> > > > @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
> > > > eventfd_ctx_put(dev->vqs[i]->error_ctx);
> > > > if (dev->vqs[i]->kick)
> > > > fput(dev->vqs[i]->kick);
> > > > - if (dev->vqs[i]->call_ctx)
> > > > - eventfd_ctx_put(dev->vqs[i]->call_ctx);
> > > > + if (dev->vqs[i]->call_ctx.ctx)
> > > > + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
> > > > vhost_vq_reset(dev, dev->vqs[i]);
> > > > }
> > > > vhost_dev_free_iovecs(dev);
> > > > @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev
> > > > *d, unsigned int ioctl, void __user *arg
> > > > r = PTR_ERR(ctx);
> > > > break;
> > > > }
> > > > - swap(ctx, vq->call_ctx);
> > > > +
> > > > + spin_lock(&vq->call_ctx.ctx_lock);
> > > > + swap(ctx, vq->call_ctx.ctx);
> > > > + spin_unlock(&vq->call_ctx.ctx_lock);
> > > > break;
> > > > case VHOST_SET_VRING_ERR:
> > > > if (copy_from_user(&f, argp, sizeof f)) {
> > > > @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev
> > > > *dev, struct vhost_virtqueue *vq)
> > > > void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> > > > {
> > > > /* Signal the Guest tell them we used something up. */
> > > > - if (vq->call_ctx && vhost_notify(dev, vq))
> > > > - eventfd_signal(vq->call_ctx, 1);
> > > > + if (vq->call_ctx.ctx && vhost_notify(dev, vq))
> > > > + eventfd_signal(vq->call_ctx.ctx, 1);
> > > > }
> > > > EXPORT_SYMBOL_GPL(vhost_signal);
> > > > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > > > index c8e96a095d3b..38eb1aa3b68d 100644
> > > > --- a/drivers/vhost/vhost.h
> > > > +++ b/drivers/vhost/vhost.h
> > > > @@ -13,6 +13,7 @@
> > > > #include <linux/virtio_ring.h>
> > > > #include <linux/atomic.h>
> > > > #include <linux/vhost_iotlb.h>
> > > > +#include <linux/irqbypass.h>
> > > > struct vhost_work;
> > > > typedef void (*vhost_work_fn_t)(struct vhost_work *work);
> > > > @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
> > > > VHOST_NUM_ADDRS = 3,
> > > > };
> > > > +struct vhost_vring_call {
> > > > + struct eventfd_ctx *ctx;
> > > > + struct irq_bypass_producer producer;
> > > > + spinlock_t ctx_lock;
> > >
> > >
> > > It's not clear to me why we need ctx_lock here.
> > >
> > > Thanks
> > Hi Jason,
> >
> > we use this lock to protect the eventfd_ctx and irq from race conditions,
>
>
> We don't support irq notification from vDPA device driver in this version,
> do we still have race condition?
>
> Thanks
Jason I'm not sure what you are trying to say here.
>
> > are you suggesting a better name?
> >
> > Thanks
> > >
> > >
> > > > +};
> > > > +
> > > > /* The virtqueue structure describes a queue attached to a device. */
> > > > struct vhost_virtqueue {
> > > > struct vhost_dev *dev;
> > > > @@ -72,7 +79,7 @@ struct vhost_virtqueue {
> > > > vring_used_t __user *used;
> > > > const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
> > > > struct file *kick;
> > > > - struct eventfd_ctx *call_ctx;
> > > > + struct vhost_vring_call call_ctx;
> > > > struct eventfd_ctx *error_ctx;
> > > > struct eventfd_ctx *log_ctx;
> > >
next prev parent reply other threads:[~2020-08-04 9:21 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-31 6:55 [PATCH V5 0/6] IRQ offloading for vDPA Zhu Lingshan
2020-07-31 6:55 ` [PATCH V5 1/6] vhost: introduce vhost_vring_call Zhu Lingshan
2020-08-04 8:38 ` Jason Wang
[not found] ` <d51dd4e3-7513-c771-104c-b61f9ee70f30@intel.com>
2020-08-04 8:53 ` Jason Wang
2020-08-04 9:21 ` Michael S. Tsirkin [this message]
2020-08-05 2:16 ` Jason Wang
[not found] ` <bf10cde9-db86-a1ac-e2a8-363735e49afb@intel.com>
2020-08-05 5:53 ` Jason Wang
2020-08-10 13:37 ` Michael S. Tsirkin
2020-08-11 2:53 ` Jason Wang
[not found] ` <4605de34-c426-33d4-714b-e03716d0374c@intel.com>
2020-08-05 2:20 ` Jason Wang
2020-07-31 6:55 ` [PATCH V5 2/6] kvm: detect assigned device via irqbypass manager Zhu Lingshan
2020-07-31 6:55 ` [PATCH V5 3/6] vDPA: add get_vq_irq() in vdpa_config_ops Zhu Lingshan
2020-07-31 6:55 ` [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa Zhu Lingshan
2020-08-04 8:51 ` Jason Wang
[not found] ` <ae5385dc-6637-c5a3-b00a-02f66bb9a85f@intel.com>
2020-08-04 9:36 ` Michael S. Tsirkin
2020-08-05 2:36 ` Jason Wang
[not found] ` <cb01a490-e9e6-42f5-b6c3-caefa2d91b9f@intel.com>
2020-08-05 5:51 ` Jason Wang
2020-08-05 8:06 ` Jason Wang
2020-08-05 8:12 ` Zhu, Lingshan
2020-07-31 6:55 ` [PATCH V5 5/6] ifcvf: implement vdpa_config_ops.get_vq_irq() Zhu Lingshan
2020-07-31 6:55 ` [PATCH V5 6/6] irqbypass: do not start cons/prod when failed connect Zhu Lingshan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200804052050-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=eli@mellanox.com \
--cc=jasowang@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=lingshan.zhu@intel.com \
--cc=netdev@vger.kernel.org \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=sean.j.christopherson@intel.com \
--cc=shahafs@mellanox.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=wanpengli@tencent.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).