* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
[not found] ` <20200731065533.4144-2-lingshan.zhu@intel.com>
@ 2020-08-04 8:38 ` Jason Wang
[not found] ` <d51dd4e3-7513-c771-104c-b61f9ee70f30@intel.com>
0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2020-08-04 8:38 UTC (permalink / raw)
To: Zhu Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> This commit introduces struct vhost_vring_call which replaced
> raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
> Besides eventfd_ctx, it contains a spin lock and an
> irq_bypass_producer in its structure.
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/vdpa.c | 4 ++--
> drivers/vhost/vhost.c | 22 ++++++++++++++++------
> drivers/vhost/vhost.h | 9 ++++++++-
> 3 files changed, 26 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index a54b60d6623f..df3cf386b0cd 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
> static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
> {
> struct vhost_virtqueue *vq = private;
> - struct eventfd_ctx *call_ctx = vq->call_ctx;
> + struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
>
> if (call_ctx)
> eventfd_signal(call_ctx, 1);
> @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
> break;
>
> case VHOST_SET_VRING_CALL:
> - if (vq->call_ctx) {
> + if (vq->call_ctx.ctx) {
> cb.callback = vhost_vdpa_virtqueue_cb;
> cb.private = vq;
> } else {
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index d7b8df3edffc..9f1a845a9302 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
> __vhost_vq_meta_reset(d->vqs[i]);
> }
>
> +static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
> +{
> + call_ctx->ctx = NULL;
> + memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
> + spin_lock_init(&call_ctx->ctx_lock);
> +}
> +
> static void vhost_vq_reset(struct vhost_dev *dev,
> struct vhost_virtqueue *vq)
> {
> @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
> vq->log_base = NULL;
> vq->error_ctx = NULL;
> vq->kick = NULL;
> - vq->call_ctx = NULL;
> vq->log_ctx = NULL;
> vhost_reset_is_le(vq);
> vhost_disable_cross_endian(vq);
> vq->busyloop_timeout = 0;
> vq->umem = NULL;
> vq->iotlb = NULL;
> + vhost_vring_call_reset(&vq->call_ctx);
> __vhost_vq_meta_reset(vq);
> }
>
> @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
> eventfd_ctx_put(dev->vqs[i]->error_ctx);
> if (dev->vqs[i]->kick)
> fput(dev->vqs[i]->kick);
> - if (dev->vqs[i]->call_ctx)
> - eventfd_ctx_put(dev->vqs[i]->call_ctx);
> + if (dev->vqs[i]->call_ctx.ctx)
> + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
> vhost_vq_reset(dev, dev->vqs[i]);
> }
> vhost_dev_free_iovecs(dev);
> @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
> r = PTR_ERR(ctx);
> break;
> }
> - swap(ctx, vq->call_ctx);
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + swap(ctx, vq->call_ctx.ctx);
> + spin_unlock(&vq->call_ctx.ctx_lock);
> break;
> case VHOST_SET_VRING_ERR:
> if (copy_from_user(&f, argp, sizeof f)) {
> @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> {
> /* Signal the Guest tell them we used something up. */
> - if (vq->call_ctx && vhost_notify(dev, vq))
> - eventfd_signal(vq->call_ctx, 1);
> + if (vq->call_ctx.ctx && vhost_notify(dev, vq))
> + eventfd_signal(vq->call_ctx.ctx, 1);
> }
> EXPORT_SYMBOL_GPL(vhost_signal);
>
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index c8e96a095d3b..38eb1aa3b68d 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -13,6 +13,7 @@
> #include <linux/virtio_ring.h>
> #include <linux/atomic.h>
> #include <linux/vhost_iotlb.h>
> +#include <linux/irqbypass.h>
>
> struct vhost_work;
> typedef void (*vhost_work_fn_t)(struct vhost_work *work);
> @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
> VHOST_NUM_ADDRS = 3,
> };
>
> +struct vhost_vring_call {
> + struct eventfd_ctx *ctx;
> + struct irq_bypass_producer producer;
> + spinlock_t ctx_lock;
It's not clear to me why we need ctx_lock here.
Thanks
> +};
> +
> /* The virtqueue structure describes a queue attached to a device. */
> struct vhost_virtqueue {
> struct vhost_dev *dev;
> @@ -72,7 +79,7 @@ struct vhost_virtqueue {
> vring_used_t __user *used;
> const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
> struct file *kick;
> - struct eventfd_ctx *call_ctx;
> + struct vhost_vring_call call_ctx;
> struct eventfd_ctx *error_ctx;
> struct eventfd_ctx *log_ctx;
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa
[not found] ` <20200731065533.4144-5-lingshan.zhu@intel.com>
@ 2020-08-04 8:51 ` Jason Wang
[not found] ` <ae5385dc-6637-c5a3-b00a-02f66bb9a85f@intel.com>
2020-08-05 8:06 ` Jason Wang
1 sibling, 1 reply; 13+ messages in thread
From: Jason Wang @ 2020-08-04 8:51 UTC (permalink / raw)
To: Zhu Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> This patch introduce a set of functions for setup/unsetup
> and update irq offloading respectively by register/unregister
> and re-register the irq_bypass_producer.
>
> With these functions, this commit can setup/unsetup
> irq offloading through setting DRIVER_OK/!DRIVER_OK, and
> update irq offloading through SET_VRING_CALL.
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/Kconfig | 1 +
> drivers/vhost/vdpa.c | 79 ++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 79 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
> index d3688c6afb87..587fbae06182 100644
> --- a/drivers/vhost/Kconfig
> +++ b/drivers/vhost/Kconfig
> @@ -65,6 +65,7 @@ config VHOST_VDPA
> tristate "Vhost driver for vDPA-based backend"
> depends on EVENTFD
> select VHOST
> + select IRQ_BYPASS_MANAGER
> depends on VDPA
> help
> This kernel module can be loaded in host kernel to accelerate
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index df3cf386b0cd..278ea2f00172 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -115,6 +115,55 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
> return IRQ_HANDLED;
> }
>
> +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
> +{
> + struct vhost_virtqueue *vq = &v->vqs[qid];
> + const struct vdpa_config_ops *ops = v->vdpa->config;
> + struct vdpa_device *vdpa = v->vdpa;
> + int ret, irq;
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + irq = ops->get_vq_irq(vdpa, qid);
> + if (!vq->call_ctx.ctx || irq < 0) {
> + spin_unlock(&vq->call_ctx.ctx_lock);
> + return;
> + }
> +
> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
> + vq->call_ctx.producer.irq = irq;
> + ret = irq_bypass_register_producer(&vq->call_ctx.producer);
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
> +
> +static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
> +{
> + struct vhost_virtqueue *vq = &v->vqs[qid];
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
Any reason for not checking vq->call_ctx.producer.irq as below here?
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
> +
> +static void vhost_vdpa_update_vq_irq(struct vhost_virtqueue *vq)
> +{
> + spin_lock(&vq->call_ctx.ctx_lock);
> + /*
> + * if it has a non-zero irq, means there is a
> + * previsouly registered irq_bypass_producer,
> + * we should update it when ctx (its token)
> + * changes.
> + */
> + if (!vq->call_ctx.producer.irq) {
> + spin_unlock(&vq->call_ctx.ctx_lock);
> + return;
> + }
> +
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
> + irq_bypass_register_producer(&vq->call_ctx.producer);
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
I think setup_irq() and update_irq() could be unified with the following
logic:
irq_bypass_unregister_producer(&vq->call_ctx.producer);
irq = ops->get_vq_irq(vdpa, qid);
if (!vq->call_ctx.ctx || irq < 0) {
spin_unlock(&vq->call_ctx.ctx_lock);
return;
}
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
> +
> static void vhost_vdpa_reset(struct vhost_vdpa *v)
> {
> struct vdpa_device *vdpa = v->vdpa;
> @@ -155,11 +204,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
> {
> struct vdpa_device *vdpa = v->vdpa;
> const struct vdpa_config_ops *ops = vdpa->config;
> - u8 status;
> + u8 status, status_old;
> + int nvqs = v->nvqs;
> + u16 i;
>
> if (copy_from_user(&status, statusp, sizeof(status)))
> return -EFAULT;
>
> + status_old = ops->get_status(vdpa);
> +
> /*
> * Userspace shouldn't remove status bits unless reset the
> * status to 0.
> @@ -169,6 +222,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
>
> ops->set_status(vdpa, status);
>
> + /* vq irq is not expected to be changed once DRIVER_OK is set */
Let's move this comment to the get_vq_irq bus operation.
> + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
> + for (i = 0; i < nvqs; i++)
> + vhost_vdpa_setup_vq_irq(v, i);
> +
> + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
> + for (i = 0; i < nvqs; i++)
> + vhost_vdpa_unsetup_vq_irq(v, i);
> +
> return 0;
> }
>
> @@ -332,6 +394,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
>
> return 0;
> }
> +
> static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
> void __user *argp)
> {
> @@ -390,6 +453,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
> cb.private = NULL;
> }
> ops->set_vq_cb(vdpa, idx, &cb);
> + vhost_vdpa_update_vq_irq(vq);
> break;
>
> case VHOST_SET_VRING_NUM:
> @@ -765,6 +829,18 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
> return r;
> }
>
> +static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
> +{
> + struct vhost_virtqueue *vq;
> + int i;
> +
> + for (i = 0; i < v->nvqs; i++) {
> + vq = &v->vqs[i];
> + if (vq->call_ctx.producer.irq)
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
> + }
> +}
Why not using vhost_vdpa_unsetup_vq_irq()?
Thanks
> +
> static int vhost_vdpa_release(struct inode *inode, struct file *filep)
> {
> struct vhost_vdpa *v = filep->private_data;
> @@ -777,6 +853,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
> vhost_vdpa_iotlb_free(v);
> vhost_vdpa_free_domain(v);
> vhost_vdpa_config_put(v);
> + vhost_vdpa_clean_irq(v);
> vhost_dev_cleanup(&v->vdev);
> kfree(v->vdev.vqs);
> mutex_unlock(&d->mutex);
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
[not found] ` <d51dd4e3-7513-c771-104c-b61f9ee70f30@intel.com>
@ 2020-08-04 8:53 ` Jason Wang
2020-08-04 9:21 ` Michael S. Tsirkin
[not found] ` <4605de34-c426-33d4-714b-e03716d0374c@intel.com>
0 siblings, 2 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-04 8:53 UTC (permalink / raw)
To: Zhu, Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/8/4 下午4:42, Zhu, Lingshan wrote:
>
>
> On 8/4/2020 4:38 PM, Jason Wang wrote:
>>
>> On 2020/7/31 下午2:55, Zhu Lingshan wrote:
>>> This commit introduces struct vhost_vring_call which replaced
>>> raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
>>> Besides eventfd_ctx, it contains a spin lock and an
>>> irq_bypass_producer in its structure.
>>>
>>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>>> Suggested-by: Jason Wang <jasowang@redhat.com>
>>> ---
>>> drivers/vhost/vdpa.c | 4 ++--
>>> drivers/vhost/vhost.c | 22 ++++++++++++++++------
>>> drivers/vhost/vhost.h | 9 ++++++++-
>>> 3 files changed, 26 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>> index a54b60d6623f..df3cf386b0cd 100644
>>> --- a/drivers/vhost/vdpa.c
>>> +++ b/drivers/vhost/vdpa.c
>>> @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
>>> static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
>>> {
>>> struct vhost_virtqueue *vq = private;
>>> - struct eventfd_ctx *call_ctx = vq->call_ctx;
>>> + struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
>>> if (call_ctx)
>>> eventfd_signal(call_ctx, 1);
>>> @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct
>>> vhost_vdpa *v, unsigned int cmd,
>>> break;
>>> case VHOST_SET_VRING_CALL:
>>> - if (vq->call_ctx) {
>>> + if (vq->call_ctx.ctx) {
>>> cb.callback = vhost_vdpa_virtqueue_cb;
>>> cb.private = vq;
>>> } else {
>>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>>> index d7b8df3edffc..9f1a845a9302 100644
>>> --- a/drivers/vhost/vhost.c
>>> +++ b/drivers/vhost/vhost.c
>>> @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct
>>> vhost_dev *d)
>>> __vhost_vq_meta_reset(d->vqs[i]);
>>> }
>>> +static void vhost_vring_call_reset(struct vhost_vring_call
>>> *call_ctx)
>>> +{
>>> + call_ctx->ctx = NULL;
>>> + memset(&call_ctx->producer, 0x0, sizeof(struct
>>> irq_bypass_producer));
>>> + spin_lock_init(&call_ctx->ctx_lock);
>>> +}
>>> +
>>> static void vhost_vq_reset(struct vhost_dev *dev,
>>> struct vhost_virtqueue *vq)
>>> {
>>> @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
>>> vq->log_base = NULL;
>>> vq->error_ctx = NULL;
>>> vq->kick = NULL;
>>> - vq->call_ctx = NULL;
>>> vq->log_ctx = NULL;
>>> vhost_reset_is_le(vq);
>>> vhost_disable_cross_endian(vq);
>>> vq->busyloop_timeout = 0;
>>> vq->umem = NULL;
>>> vq->iotlb = NULL;
>>> + vhost_vring_call_reset(&vq->call_ctx);
>>> __vhost_vq_meta_reset(vq);
>>> }
>>> @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
>>> eventfd_ctx_put(dev->vqs[i]->error_ctx);
>>> if (dev->vqs[i]->kick)
>>> fput(dev->vqs[i]->kick);
>>> - if (dev->vqs[i]->call_ctx)
>>> - eventfd_ctx_put(dev->vqs[i]->call_ctx);
>>> + if (dev->vqs[i]->call_ctx.ctx)
>>> + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
>>> vhost_vq_reset(dev, dev->vqs[i]);
>>> }
>>> vhost_dev_free_iovecs(dev);
>>> @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d,
>>> unsigned int ioctl, void __user *arg
>>> r = PTR_ERR(ctx);
>>> break;
>>> }
>>> - swap(ctx, vq->call_ctx);
>>> +
>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>> + swap(ctx, vq->call_ctx.ctx);
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> break;
>>> case VHOST_SET_VRING_ERR:
>>> if (copy_from_user(&f, argp, sizeof f)) {
>>> @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev
>>> *dev, struct vhost_virtqueue *vq)
>>> void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>>> {
>>> /* Signal the Guest tell them we used something up. */
>>> - if (vq->call_ctx && vhost_notify(dev, vq))
>>> - eventfd_signal(vq->call_ctx, 1);
>>> + if (vq->call_ctx.ctx && vhost_notify(dev, vq))
>>> + eventfd_signal(vq->call_ctx.ctx, 1);
>>> }
>>> EXPORT_SYMBOL_GPL(vhost_signal);
>>> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>>> index c8e96a095d3b..38eb1aa3b68d 100644
>>> --- a/drivers/vhost/vhost.h
>>> +++ b/drivers/vhost/vhost.h
>>> @@ -13,6 +13,7 @@
>>> #include <linux/virtio_ring.h>
>>> #include <linux/atomic.h>
>>> #include <linux/vhost_iotlb.h>
>>> +#include <linux/irqbypass.h>
>>> struct vhost_work;
>>> typedef void (*vhost_work_fn_t)(struct vhost_work *work);
>>> @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
>>> VHOST_NUM_ADDRS = 3,
>>> };
>>> +struct vhost_vring_call {
>>> + struct eventfd_ctx *ctx;
>>> + struct irq_bypass_producer producer;
>>> + spinlock_t ctx_lock;
>>
>>
>> It's not clear to me why we need ctx_lock here.
>>
>> Thanks
> Hi Jason,
>
> we use this lock to protect the eventfd_ctx and irq from race conditions,
We don't support irq notification from vDPA device driver in this
version, do we still have race condition?
Thanks
> are you suggesting a better name?
>
> Thanks
>>
>>
>>> +};
>>> +
>>> /* The virtqueue structure describes a queue attached to a device. */
>>> struct vhost_virtqueue {
>>> struct vhost_dev *dev;
>>> @@ -72,7 +79,7 @@ struct vhost_virtqueue {
>>> vring_used_t __user *used;
>>> const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
>>> struct file *kick;
>>> - struct eventfd_ctx *call_ctx;
>>> + struct vhost_vring_call call_ctx;
>>> struct eventfd_ctx *error_ctx;
>>> struct eventfd_ctx *log_ctx;
>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
2020-08-04 8:53 ` Jason Wang
@ 2020-08-04 9:21 ` Michael S. Tsirkin
2020-08-05 2:16 ` Jason Wang
[not found] ` <4605de34-c426-33d4-714b-e03716d0374c@intel.com>
1 sibling, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2020-08-04 9:21 UTC (permalink / raw)
To: Jason Wang
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini, Zhu, Lingshan
On Tue, Aug 04, 2020 at 04:53:39PM +0800, Jason Wang wrote:
>
> On 2020/8/4 下午4:42, Zhu, Lingshan wrote:
> >
> >
> > On 8/4/2020 4:38 PM, Jason Wang wrote:
> > >
> > > On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> > > > This commit introduces struct vhost_vring_call which replaced
> > > > raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
> > > > Besides eventfd_ctx, it contains a spin lock and an
> > > > irq_bypass_producer in its structure.
> > > >
> > > > Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> > > > Suggested-by: Jason Wang <jasowang@redhat.com>
> > > > ---
> > > > drivers/vhost/vdpa.c | 4 ++--
> > > > drivers/vhost/vhost.c | 22 ++++++++++++++++------
> > > > drivers/vhost/vhost.h | 9 ++++++++-
> > > > 3 files changed, 26 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> > > > index a54b60d6623f..df3cf386b0cd 100644
> > > > --- a/drivers/vhost/vdpa.c
> > > > +++ b/drivers/vhost/vdpa.c
> > > > @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
> > > > static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
> > > > {
> > > > struct vhost_virtqueue *vq = private;
> > > > - struct eventfd_ctx *call_ctx = vq->call_ctx;
> > > > + struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
> > > > if (call_ctx)
> > > > eventfd_signal(call_ctx, 1);
> > > > @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct
> > > > vhost_vdpa *v, unsigned int cmd,
> > > > break;
> > > > case VHOST_SET_VRING_CALL:
> > > > - if (vq->call_ctx) {
> > > > + if (vq->call_ctx.ctx) {
> > > > cb.callback = vhost_vdpa_virtqueue_cb;
> > > > cb.private = vq;
> > > > } else {
> > > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> > > > index d7b8df3edffc..9f1a845a9302 100644
> > > > --- a/drivers/vhost/vhost.c
> > > > +++ b/drivers/vhost/vhost.c
> > > > @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct
> > > > vhost_dev *d)
> > > > __vhost_vq_meta_reset(d->vqs[i]);
> > > > }
> > > > +static void vhost_vring_call_reset(struct vhost_vring_call
> > > > *call_ctx)
> > > > +{
> > > > + call_ctx->ctx = NULL;
> > > > + memset(&call_ctx->producer, 0x0, sizeof(struct
> > > > irq_bypass_producer));
> > > > + spin_lock_init(&call_ctx->ctx_lock);
> > > > +}
> > > > +
> > > > static void vhost_vq_reset(struct vhost_dev *dev,
> > > > struct vhost_virtqueue *vq)
> > > > {
> > > > @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
> > > > vq->log_base = NULL;
> > > > vq->error_ctx = NULL;
> > > > vq->kick = NULL;
> > > > - vq->call_ctx = NULL;
> > > > vq->log_ctx = NULL;
> > > > vhost_reset_is_le(vq);
> > > > vhost_disable_cross_endian(vq);
> > > > vq->busyloop_timeout = 0;
> > > > vq->umem = NULL;
> > > > vq->iotlb = NULL;
> > > > + vhost_vring_call_reset(&vq->call_ctx);
> > > > __vhost_vq_meta_reset(vq);
> > > > }
> > > > @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
> > > > eventfd_ctx_put(dev->vqs[i]->error_ctx);
> > > > if (dev->vqs[i]->kick)
> > > > fput(dev->vqs[i]->kick);
> > > > - if (dev->vqs[i]->call_ctx)
> > > > - eventfd_ctx_put(dev->vqs[i]->call_ctx);
> > > > + if (dev->vqs[i]->call_ctx.ctx)
> > > > + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
> > > > vhost_vq_reset(dev, dev->vqs[i]);
> > > > }
> > > > vhost_dev_free_iovecs(dev);
> > > > @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev
> > > > *d, unsigned int ioctl, void __user *arg
> > > > r = PTR_ERR(ctx);
> > > > break;
> > > > }
> > > > - swap(ctx, vq->call_ctx);
> > > > +
> > > > + spin_lock(&vq->call_ctx.ctx_lock);
> > > > + swap(ctx, vq->call_ctx.ctx);
> > > > + spin_unlock(&vq->call_ctx.ctx_lock);
> > > > break;
> > > > case VHOST_SET_VRING_ERR:
> > > > if (copy_from_user(&f, argp, sizeof f)) {
> > > > @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev
> > > > *dev, struct vhost_virtqueue *vq)
> > > > void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> > > > {
> > > > /* Signal the Guest tell them we used something up. */
> > > > - if (vq->call_ctx && vhost_notify(dev, vq))
> > > > - eventfd_signal(vq->call_ctx, 1);
> > > > + if (vq->call_ctx.ctx && vhost_notify(dev, vq))
> > > > + eventfd_signal(vq->call_ctx.ctx, 1);
> > > > }
> > > > EXPORT_SYMBOL_GPL(vhost_signal);
> > > > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > > > index c8e96a095d3b..38eb1aa3b68d 100644
> > > > --- a/drivers/vhost/vhost.h
> > > > +++ b/drivers/vhost/vhost.h
> > > > @@ -13,6 +13,7 @@
> > > > #include <linux/virtio_ring.h>
> > > > #include <linux/atomic.h>
> > > > #include <linux/vhost_iotlb.h>
> > > > +#include <linux/irqbypass.h>
> > > > struct vhost_work;
> > > > typedef void (*vhost_work_fn_t)(struct vhost_work *work);
> > > > @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
> > > > VHOST_NUM_ADDRS = 3,
> > > > };
> > > > +struct vhost_vring_call {
> > > > + struct eventfd_ctx *ctx;
> > > > + struct irq_bypass_producer producer;
> > > > + spinlock_t ctx_lock;
> > >
> > >
> > > It's not clear to me why we need ctx_lock here.
> > >
> > > Thanks
> > Hi Jason,
> >
> > we use this lock to protect the eventfd_ctx and irq from race conditions,
>
>
> We don't support irq notification from vDPA device driver in this version,
> do we still have race condition?
>
> Thanks
Jason I'm not sure what you are trying to say here.
>
> > are you suggesting a better name?
> >
> > Thanks
> > >
> > >
> > > > +};
> > > > +
> > > > /* The virtqueue structure describes a queue attached to a device. */
> > > > struct vhost_virtqueue {
> > > > struct vhost_dev *dev;
> > > > @@ -72,7 +79,7 @@ struct vhost_virtqueue {
> > > > vring_used_t __user *used;
> > > > const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
> > > > struct file *kick;
> > > > - struct eventfd_ctx *call_ctx;
> > > > + struct vhost_vring_call call_ctx;
> > > > struct eventfd_ctx *error_ctx;
> > > > struct eventfd_ctx *log_ctx;
> > >
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa
[not found] ` <ae5385dc-6637-c5a3-b00a-02f66bb9a85f@intel.com>
@ 2020-08-04 9:36 ` Michael S. Tsirkin
2020-08-05 2:36 ` Jason Wang
1 sibling, 0 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2020-08-04 9:36 UTC (permalink / raw)
To: Zhu, Lingshan
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini
On Tue, Aug 04, 2020 at 05:31:38PM +0800, Zhu, Lingshan wrote:
>
> On 8/4/2020 4:51 PM, Jason Wang wrote:
>
>
> On 2020/7/31 下午2:55, Zhu Lingshan wrote:
>
> This patch introduce a set of functions for setup/unsetup
> and update irq offloading respectively by register/unregister
> and re-register the irq_bypass_producer.
>
> With these functions, this commit can setup/unsetup
> irq offloading through setting DRIVER_OK/!DRIVER_OK, and
> update irq offloading through SET_VRING_CALL.
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/Kconfig | 1 +
> drivers/vhost/vdpa.c | 79
> ++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 79 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
> index d3688c6afb87..587fbae06182 100644
> --- a/drivers/vhost/Kconfig
> +++ b/drivers/vhost/Kconfig
> @@ -65,6 +65,7 @@ config VHOST_VDPA
> tristate "Vhost driver for vDPA-based backend"
> depends on EVENTFD
> select VHOST
> + select IRQ_BYPASS_MANAGER
> depends on VDPA
> help
> This kernel module can be loaded in host kernel to accelerate
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index df3cf386b0cd..278ea2f00172 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -115,6 +115,55 @@ static irqreturn_t vhost_vdpa_config_cb(void
> *private)
> return IRQ_HANDLED;
> }
> +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
> +{
> + struct vhost_virtqueue *vq = &v->vqs[qid];
> + const struct vdpa_config_ops *ops = v->vdpa->config;
> + struct vdpa_device *vdpa = v->vdpa;
> + int ret, irq;
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + irq = ops->get_vq_irq(vdpa, qid);
> + if (!vq->call_ctx.ctx || irq < 0) {
> + spin_unlock(&vq->call_ctx.ctx_lock);
> + return;
> + }
> +
> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
> + vq->call_ctx.producer.irq = irq;
> + ret = irq_bypass_register_producer(&vq->call_ctx.producer);
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
> +
> +static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
> +{
> + struct vhost_virtqueue *vq = &v->vqs[qid];
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>
>
>
> Any reason for not checking vq->call_ctx.producer.irq as below here?
>
> we only need ctx as a token to unregister vq from irq bypass manager, if vq->call_ctx.producer.irq is 0, means it is a unused or disabled vq, no harm if we
> perform an unregister on it.
>
>
>
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
> +
> +static void vhost_vdpa_update_vq_irq(struct vhost_virtqueue *vq)
> +{
> + spin_lock(&vq->call_ctx.ctx_lock);
> + /*
> + * if it has a non-zero irq, means there is a
> + * previsouly registered irq_bypass_producer,
> + * we should update it when ctx (its token)
> + * changes.
> + */
> + if (!vq->call_ctx.producer.irq) {
> + spin_unlock(&vq->call_ctx.ctx_lock);
> + return;
> + }
> +
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
> + irq_bypass_register_producer(&vq->call_ctx.producer);
> + spin_unlock(&vq->call_ctx.ctx_lock);
> +}
>
>
>
> I think setup_irq() and update_irq() could be unified with the following
> logic:
>
> irq_bypass_unregister_producer(&vq->call_ctx.producer);
> irq = ops->get_vq_irq(vdpa, qid);
> if (!vq->call_ctx.ctx || irq < 0) {
> spin_unlock(&vq->call_ctx.ctx_lock);
> return;
> }
>
> vq->call_ctx.producer.token = vq->call_ctx.ctx;
> vq->call_ctx.producer.irq = irq;
> ret = irq_bypass_register_producer(&vq->call_ctx.producer);
>
> Yes, this code piece can do both register and update. Though it's rare to call undate_irq(), however
> setup_irq() is very likely to be called for every vq, so this may cause several rounds of useless irq_bypass_unregister_producer().
> is it worth for simplify the code?
>
>
> +
> static void vhost_vdpa_reset(struct vhost_vdpa *v)
> {
> struct vdpa_device *vdpa = v->vdpa;
> @@ -155,11 +204,15 @@ static long vhost_vdpa_set_status(struct
> vhost_vdpa *v, u8 __user *statusp)
> {
> struct vdpa_device *vdpa = v->vdpa;
> const struct vdpa_config_ops *ops = vdpa->config;
> - u8 status;
> + u8 status, status_old;
> + int nvqs = v->nvqs;
> + u16 i;
> if (copy_from_user(&status, statusp, sizeof(status)))
> return -EFAULT;
> + status_old = ops->get_status(vdpa);
> +
> /*
> * Userspace shouldn't remove status bits unless reset the
> * status to 0.
> @@ -169,6 +222,15 @@ static long vhost_vdpa_set_status(struct
> vhost_vdpa *v, u8 __user *statusp)
> ops->set_status(vdpa, status);
> + /* vq irq is not expected to be changed once DRIVER_OK is set */
>
>
>
> Let's move this comment to the get_vq_irq bus operation.
>
> OK, can do!
>
>
Patch on top pls, these are in my tree now.
>
> + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old &
> VIRTIO_CONFIG_S_DRIVER_OK))
> + for (i = 0; i < nvqs; i++)
> + vhost_vdpa_setup_vq_irq(v, i);
> +
> + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status &
> VIRTIO_CONFIG_S_DRIVER_OK))
> + for (i = 0; i < nvqs; i++)
> + vhost_vdpa_unsetup_vq_irq(v, i);
> +
> return 0;
> }
> @@ -332,6 +394,7 @@ static long vhost_vdpa_set_config_call(struct
> vhost_vdpa *v, u32 __user *argp)
> return 0;
> }
> +
> static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int
> cmd,
> void __user *argp)
> {
> @@ -390,6 +453,7 @@ static long vhost_vdpa_vring_ioctl(struct
> vhost_vdpa *v, unsigned int cmd,
> cb.private = NULL;
> }
> ops->set_vq_cb(vdpa, idx, &cb);
> + vhost_vdpa_update_vq_irq(vq);
> break;
> case VHOST_SET_VRING_NUM:
> @@ -765,6 +829,18 @@ static int vhost_vdpa_open(struct inode *inode,
> struct file *filep)
> return r;
> }
> +static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
> +{
> + struct vhost_virtqueue *vq;
> + int i;
> +
> + for (i = 0; i < v->nvqs; i++) {
> + vq = &v->vqs[i];
> + if (vq->call_ctx.producer.irq)
> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
> + }
> +}
>
>
>
> Why not using vhost_vdpa_unsetup_vq_irq()?
>
> IMHO, in this cleanup phase, the device is almost dead, user space won't change ctx anymore, so I think we don't need to check ctx or irq, can just unregister it.
>
> Thanks!
>
>
> Thanks
>
>
>
> +
> static int vhost_vdpa_release(struct inode *inode, struct file
> *filep)
> {
> struct vhost_vdpa *v = filep->private_data;
> @@ -777,6 +853,7 @@ static int vhost_vdpa_release(struct inode *inode,
> struct file *filep)
> vhost_vdpa_iotlb_free(v);
> vhost_vdpa_free_domain(v);
> vhost_vdpa_config_put(v);
> + vhost_vdpa_clean_irq(v);
> vhost_dev_cleanup(&v->vdev);
> kfree(v->vdev.vqs);
> mutex_unlock(&d->mutex);
>
>
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
2020-08-04 9:21 ` Michael S. Tsirkin
@ 2020-08-05 2:16 ` Jason Wang
[not found] ` <bf10cde9-db86-a1ac-e2a8-363735e49afb@intel.com>
2020-08-10 13:37 ` Michael S. Tsirkin
0 siblings, 2 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-05 2:16 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini, Zhu, Lingshan
On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>> +struct vhost_vring_call {
>>>>> + struct eventfd_ctx *ctx;
>>>>> + struct irq_bypass_producer producer;
>>>>> + spinlock_t ctx_lock;
>>>> It's not clear to me why we need ctx_lock here.
>>>>
>>>> Thanks
>>> Hi Jason,
>>>
>>> we use this lock to protect the eventfd_ctx and irq from race conditions,
>> We don't support irq notification from vDPA device driver in this version,
>> do we still have race condition?
>>
>> Thanks
> Jason I'm not sure what you are trying to say here.
I meant we change the API from V4 so driver won't notify us if irq is
changed.
Then it looks to me there's no need for the ctx_lock, everyhing could be
synchronized with vq mutex.
Thanks
>
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
[not found] ` <4605de34-c426-33d4-714b-e03716d0374c@intel.com>
@ 2020-08-05 2:20 ` Jason Wang
0 siblings, 0 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-05 2:20 UTC (permalink / raw)
To: Zhu, Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/8/4 下午5:21, Zhu, Lingshan wrote:
>>> Hi Jason,
>>>
>>> we use this lock to protect the eventfd_ctx and irq from race
>>> conditions,
>>
>>
>> We don't support irq notification from vDPA device driver in this
>> version, do we still have race condition?
> as we discussed before:
> (1)if vendor change IRQ after DRIVER_OK, through they should not do this, but they can.
> (2)if user space changes ctx.
>
> Thanks
Yes, but then everything happens in context of syscall (ioctl), so vq
mutex is sufficient I guess?
Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa
[not found] ` <ae5385dc-6637-c5a3-b00a-02f66bb9a85f@intel.com>
2020-08-04 9:36 ` Michael S. Tsirkin
@ 2020-08-05 2:36 ` Jason Wang
[not found] ` <cb01a490-e9e6-42f5-b6c3-caefa2d91b9f@intel.com>
1 sibling, 1 reply; 13+ messages in thread
From: Jason Wang @ 2020-08-05 2:36 UTC (permalink / raw)
To: Zhu, Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/8/4 下午5:31, Zhu, Lingshan wrote:
>
>
> On 8/4/2020 4:51 PM, Jason Wang wrote:
>>
>> On 2020/7/31 下午2:55, Zhu Lingshan wrote:
>>> This patch introduce a set of functions for setup/unsetup
>>> and update irq offloading respectively by register/unregister
>>> and re-register the irq_bypass_producer.
>>>
>>> With these functions, this commit can setup/unsetup
>>> irq offloading through setting DRIVER_OK/!DRIVER_OK, and
>>> update irq offloading through SET_VRING_CALL.
>>>
>>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>>> Suggested-by: Jason Wang <jasowang@redhat.com>
>>> ---
>>> drivers/vhost/Kconfig | 1 +
>>> drivers/vhost/vdpa.c | 79
>>> ++++++++++++++++++++++++++++++++++++++++++-
>>> 2 files changed, 79 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
>>> index d3688c6afb87..587fbae06182 100644
>>> --- a/drivers/vhost/Kconfig
>>> +++ b/drivers/vhost/Kconfig
>>> @@ -65,6 +65,7 @@ config VHOST_VDPA
>>> tristate "Vhost driver for vDPA-based backend"
>>> depends on EVENTFD
>>> select VHOST
>>> + select IRQ_BYPASS_MANAGER
>>> depends on VDPA
>>> help
>>> This kernel module can be loaded in host kernel to accelerate
>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>> index df3cf386b0cd..278ea2f00172 100644
>>> --- a/drivers/vhost/vdpa.c
>>> +++ b/drivers/vhost/vdpa.c
>>> @@ -115,6 +115,55 @@ static irqreturn_t vhost_vdpa_config_cb(void
>>> *private)
>>> return IRQ_HANDLED;
>>> }
>>> +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
>>> +{
>>> + struct vhost_virtqueue *vq = &v->vqs[qid];
>>> + const struct vdpa_config_ops *ops = v->vdpa->config;
>>> + struct vdpa_device *vdpa = v->vdpa;
>>> + int ret, irq;
>>> +
>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>> + irq = ops->get_vq_irq(vdpa, qid);
>>> + if (!vq->call_ctx.ctx || irq < 0) {
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> + return;
>>> + }
>>> +
>>> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
>>> + vq->call_ctx.producer.irq = irq;
>>> + ret = irq_bypass_register_producer(&vq->call_ctx.producer);
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> +}
>>> +
>>> +static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
>>> +{
>>> + struct vhost_virtqueue *vq = &v->vqs[qid];
>>> +
>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>
>>
>> Any reason for not checking vq->call_ctx.producer.irq as below here?
> we only need ctx as a token to unregister vq from irq bypass manager, if vq->call_ctx.producer.irq is 0, means it is a unused or disabled vq,
This is not how the code is wrote? See above you only check whether irq
is negative, irq 0 seems acceptable.
+ spin_lock(&vq->call_ctx.ctx_lock);
+ irq = ops->get_vq_irq(vdpa, qid);
+ if (!vq->call_ctx.ctx || irq < 0) {
+ spin_unlock(&vq->call_ctx.ctx_lock);
+ return;
+ }
+
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ vq->call_ctx.producer.irq = irq;
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ spin_unlock(&vq->call_ctx.ctx_lock);
> no harm if we
> perform an unregister on it.
>>
>>
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> +}
>>> +
>>> +static void vhost_vdpa_update_vq_irq(struct vhost_virtqueue *vq)
>>> +{
>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>> + /*
>>> + * if it has a non-zero irq, means there is a
>>> + * previsouly registered irq_bypass_producer,
>>> + * we should update it when ctx (its token)
>>> + * changes.
>>> + */
>>> + if (!vq->call_ctx.producer.irq) {
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> + return;
>>> + }
>>> +
>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
>>> + irq_bypass_register_producer(&vq->call_ctx.producer);
>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>> +}
>>
>>
>> I think setup_irq() and update_irq() could be unified with the
>> following logic:
>>
>> irq_bypass_unregister_producer(&vq->call_ctx.producer);
>> irq = ops->get_vq_irq(vdpa, qid);
>> if (!vq->call_ctx.ctx || irq < 0) {
>> spin_unlock(&vq->call_ctx.ctx_lock);
>> return;
>> }
>>
>> vq->call_ctx.producer.token = vq->call_ctx.ctx;
>> vq->call_ctx.producer.irq = irq;
>> ret = irq_bypass_register_producer(&vq->call_ctx.producer);
> Yes, this code piece can do both register and update. Though it's rare to call undate_irq(), however
> setup_irq() is very likely to be called for every vq, so this may cause several rounds of useless irq_bypass_unregister_producer().
I'm not sure I get this but do you have a case for this?
> is it worth for simplify the code?
Less code(bug).
>>
>>> +
>>> static void vhost_vdpa_reset(struct vhost_vdpa *v)
>>> {
>>> struct vdpa_device *vdpa = v->vdpa;
>>> @@ -155,11 +204,15 @@ static long vhost_vdpa_set_status(struct
>>> vhost_vdpa *v, u8 __user *statusp)
>>> {
>>> struct vdpa_device *vdpa = v->vdpa;
>>> const struct vdpa_config_ops *ops = vdpa->config;
>>> - u8 status;
>>> + u8 status, status_old;
>>> + int nvqs = v->nvqs;
>>> + u16 i;
>>> if (copy_from_user(&status, statusp, sizeof(status)))
>>> return -EFAULT;
>>> + status_old = ops->get_status(vdpa);
>>> +
>>> /*
>>> * Userspace shouldn't remove status bits unless reset the
>>> * status to 0.
>>> @@ -169,6 +222,15 @@ static long vhost_vdpa_set_status(struct
>>> vhost_vdpa *v, u8 __user *statusp)
>>> ops->set_status(vdpa, status);
>>> + /* vq irq is not expected to be changed once DRIVER_OK is set */
>>
>>
>> Let's move this comment to the get_vq_irq bus operation.
> OK, can do!
>>
>>
>>> + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old &
>>> VIRTIO_CONFIG_S_DRIVER_OK))
>>> + for (i = 0; i < nvqs; i++)
>>> + vhost_vdpa_setup_vq_irq(v, i);
>>> +
>>> + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status &
>>> VIRTIO_CONFIG_S_DRIVER_OK))
>>> + for (i = 0; i < nvqs; i++)
>>> + vhost_vdpa_unsetup_vq_irq(v, i);
>>> +
>>> return 0;
>>> }
>>> @@ -332,6 +394,7 @@ static long vhost_vdpa_set_config_call(struct
>>> vhost_vdpa *v, u32 __user *argp)
>>> return 0;
>>> }
>>> +
>>> static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned
>>> int cmd,
>>> void __user *argp)
>>> {
>>> @@ -390,6 +453,7 @@ static long vhost_vdpa_vring_ioctl(struct
>>> vhost_vdpa *v, unsigned int cmd,
>>> cb.private = NULL;
>>> }
>>> ops->set_vq_cb(vdpa, idx, &cb);
>>> + vhost_vdpa_update_vq_irq(vq);
>>> break;
>>> case VHOST_SET_VRING_NUM:
>>> @@ -765,6 +829,18 @@ static int vhost_vdpa_open(struct inode *inode,
>>> struct file *filep)
>>> return r;
>>> }
>>> +static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
>>> +{
>>> + struct vhost_virtqueue *vq;
>>> + int i;
>>> +
>>> + for (i = 0; i < v->nvqs; i++) {
>>> + vq = &v->vqs[i];
>>> + if (vq->call_ctx.producer.irq)
>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>> + }
>>> +}
>>
>>
>> Why not using vhost_vdpa_unsetup_vq_irq()?
> IMHO, in this cleanup phase, the device is almost dead, user space won't change ctx anymore, so I think we don't need to check ctx or irq,
But you check irq here? For ctx, irq_bypass_unregister_producer() can do
the check instead of us.
Thanks
> can just unregister it.
>
> Thanks!
>>
>> Thanks
>>
>>
>>> +
>>> static int vhost_vdpa_release(struct inode *inode, struct file
>>> *filep)
>>> {
>>> struct vhost_vdpa *v = filep->private_data;
>>> @@ -777,6 +853,7 @@ static int vhost_vdpa_release(struct inode
>>> *inode, struct file *filep)
>>> vhost_vdpa_iotlb_free(v);
>>> vhost_vdpa_free_domain(v);
>>> vhost_vdpa_config_put(v);
>>> + vhost_vdpa_clean_irq(v);
>>> vhost_dev_cleanup(&v->vdev);
>>> kfree(v->vdev.vqs);
>>> mutex_unlock(&d->mutex);
>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa
[not found] ` <cb01a490-e9e6-42f5-b6c3-caefa2d91b9f@intel.com>
@ 2020-08-05 5:51 ` Jason Wang
0 siblings, 0 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-05 5:51 UTC (permalink / raw)
To: Zhu, Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/8/5 下午1:45, Zhu, Lingshan wrote:
>
>
> On 8/5/2020 10:36 AM, Jason Wang wrote:
>>
>> On 2020/8/4 下午5:31, Zhu, Lingshan wrote:
>>>
>>>
>>> On 8/4/2020 4:51 PM, Jason Wang wrote:
>>>>
>>>> On 2020/7/31 下午2:55, Zhu Lingshan wrote:
>>>>> This patch introduce a set of functions for setup/unsetup
>>>>> and update irq offloading respectively by register/unregister
>>>>> and re-register the irq_bypass_producer.
>>>>>
>>>>> With these functions, this commit can setup/unsetup
>>>>> irq offloading through setting DRIVER_OK/!DRIVER_OK, and
>>>>> update irq offloading through SET_VRING_CALL.
>>>>>
>>>>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>>>>> Suggested-by: Jason Wang <jasowang@redhat.com>
>>>>> ---
>>>>> drivers/vhost/Kconfig | 1 +
>>>>> drivers/vhost/vdpa.c | 79
>>>>> ++++++++++++++++++++++++++++++++++++++++++-
>>>>> 2 files changed, 79 insertions(+), 1 deletion(-)
>>>>>
>>>>> diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
>>>>> index d3688c6afb87..587fbae06182 100644
>>>>> --- a/drivers/vhost/Kconfig
>>>>> +++ b/drivers/vhost/Kconfig
>>>>> @@ -65,6 +65,7 @@ config VHOST_VDPA
>>>>> tristate "Vhost driver for vDPA-based backend"
>>>>> depends on EVENTFD
>>>>> select VHOST
>>>>> + select IRQ_BYPASS_MANAGER
>>>>> depends on VDPA
>>>>> help
>>>>> This kernel module can be loaded in host kernel to accelerate
>>>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>>>> index df3cf386b0cd..278ea2f00172 100644
>>>>> --- a/drivers/vhost/vdpa.c
>>>>> +++ b/drivers/vhost/vdpa.c
>>>>> @@ -115,6 +115,55 @@ static irqreturn_t vhost_vdpa_config_cb(void
>>>>> *private)
>>>>> return IRQ_HANDLED;
>>>>> }
>>>>> +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
>>>>> +{
>>>>> + struct vhost_virtqueue *vq = &v->vqs[qid];
>>>>> + const struct vdpa_config_ops *ops = v->vdpa->config;
>>>>> + struct vdpa_device *vdpa = v->vdpa;
>>>>> + int ret, irq;
>>>>> +
>>>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>>>> + irq = ops->get_vq_irq(vdpa, qid);
>>>>> + if (!vq->call_ctx.ctx || irq < 0) {
>>>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>>>> + return;
>>>>> + }
>>>>> +
>>>>> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
>>>>> + vq->call_ctx.producer.irq = irq;
>>>>> + ret = irq_bypass_register_producer(&vq->call_ctx.producer);
>>>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>>>> +}
>>>>> +
>>>>> +static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
>>>>> +{
>>>>> + struct vhost_virtqueue *vq = &v->vqs[qid];
>>>>> +
>>>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>>>
>>>>
>>>> Any reason for not checking vq->call_ctx.producer.irq as below here?
>>> we only need ctx as a token to unregister vq from irq bypass
>>> manager, if vq->call_ctx.producer.irq is 0, means it is a unused or
>>> disabled vq,
>>
>>
>> This is not how the code is wrote? See above you only check whether
>> irq is negative, irq 0 seems acceptable.
> Yes, IRQ 0 is valid, so we check whether it is < 0.
>>
>> + spin_lock(&vq->call_ctx.ctx_lock);
>> + irq = ops->get_vq_irq(vdpa, qid);
>> + if (!vq->call_ctx.ctx || irq < 0) {
>> + spin_unlock(&vq->call_ctx.ctx_lock);
>> + return;
>> + }
>> +
>> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
>> + vq->call_ctx.producer.irq = irq;
>> + ret = irq_bypass_register_producer(&vq->call_ctx.producer);
>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>
>>
>>> no harm if we
>>> perform an unregister on it.
>>>>
>>>>
>>>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>>>> +}
>>>>> +
>>>>> +static void vhost_vdpa_update_vq_irq(struct vhost_virtqueue *vq)
>>>>> +{
>>>>> + spin_lock(&vq->call_ctx.ctx_lock);
>>>>> + /*
>>>>> + * if it has a non-zero irq, means there is a
>>>>> + * previsouly registered irq_bypass_producer,
>>>>> + * we should update it when ctx (its token)
>>>>> + * changes.
>>>>> + */
>>>>> + if (!vq->call_ctx.producer.irq) {
>>>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>>>> + return;
>>>>> + }
>>>>> +
>>>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>>>> + vq->call_ctx.producer.token = vq->call_ctx.ctx;
>>>>> + irq_bypass_register_producer(&vq->call_ctx.producer);
>>>>> + spin_unlock(&vq->call_ctx.ctx_lock);
>>>>> +}
>>>>
>>>>
>>>> I think setup_irq() and update_irq() could be unified with the
>>>> following logic:
>>>>
>>>> irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>>> irq = ops->get_vq_irq(vdpa, qid);
>>>> if (!vq->call_ctx.ctx || irq < 0) {
>>>> spin_unlock(&vq->call_ctx.ctx_lock);
>>>> return;
>>>> }
>>>>
>>>> vq->call_ctx.producer.token = vq->call_ctx.ctx;
>>>> vq->call_ctx.producer.irq = irq;
>>>> ret = irq_bypass_register_producer(&vq->call_ctx.producer);
>>> Yes, this code piece can do both register and update. Though it's
>>> rare to call undate_irq(), however
>>> setup_irq() is very likely to be called for every vq, so this may
>>> cause several rounds of useless irq_bypass_unregister_producer().
>>
>>
>> I'm not sure I get this but do you have a case for this?
> I mean if we use this routine to setup irq offloading, it is very likely to do a unregister producer for every vq first, but for nothing.
Does it really harm? See vfio_msi_set_vector_signal()
>>
>>
>>> is it worth for simplify the code?
>>
>>
>> Less code(bug).
> I can do this if we are chasing for perfection, however I believe bug number has positive correlation with the complexity in the logic than code lines, if we only merge lines, this may not help.
>>
Well, reduce code duplication is always good and it helps for reviewers.
>>
>>>>
>>>>> +
>>>>> static void vhost_vdpa_reset(struct vhost_vdpa *v)
>>>>> {
>>>>> struct vdpa_device *vdpa = v->vdpa;
>>>>> @@ -155,11 +204,15 @@ static long vhost_vdpa_set_status(struct
>>>>> vhost_vdpa *v, u8 __user *statusp)
>>>>> {
>>>>> struct vdpa_device *vdpa = v->vdpa;
>>>>> const struct vdpa_config_ops *ops = vdpa->config;
>>>>> - u8 status;
>>>>> + u8 status, status_old;
>>>>> + int nvqs = v->nvqs;
>>>>> + u16 i;
>>>>> if (copy_from_user(&status, statusp, sizeof(status)))
>>>>> return -EFAULT;
>>>>> + status_old = ops->get_status(vdpa);
>>>>> +
>>>>> /*
>>>>> * Userspace shouldn't remove status bits unless reset the
>>>>> * status to 0.
>>>>> @@ -169,6 +222,15 @@ static long vhost_vdpa_set_status(struct
>>>>> vhost_vdpa *v, u8 __user *statusp)
>>>>> ops->set_status(vdpa, status);
>>>>> + /* vq irq is not expected to be changed once DRIVER_OK is
>>>>> set */
>>>>
>>>>
>>>> Let's move this comment to the get_vq_irq bus operation.
>>> OK, can do!
>>>>
>>>>
>>>>> + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old &
>>>>> VIRTIO_CONFIG_S_DRIVER_OK))
>>>>> + for (i = 0; i < nvqs; i++)
>>>>> + vhost_vdpa_setup_vq_irq(v, i);
>>>>> +
>>>>> + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status &
>>>>> VIRTIO_CONFIG_S_DRIVER_OK))
>>>>> + for (i = 0; i < nvqs; i++)
>>>>> + vhost_vdpa_unsetup_vq_irq(v, i);
>>>>> +
>>>>> return 0;
>>>>> }
>>>>> @@ -332,6 +394,7 @@ static long
>>>>> vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
>>>>> return 0;
>>>>> }
>>>>> +
>>>>> static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v,
>>>>> unsigned int cmd,
>>>>> void __user *argp)
>>>>> {
>>>>> @@ -390,6 +453,7 @@ static long vhost_vdpa_vring_ioctl(struct
>>>>> vhost_vdpa *v, unsigned int cmd,
>>>>> cb.private = NULL;
>>>>> }
>>>>> ops->set_vq_cb(vdpa, idx, &cb);
>>>>> + vhost_vdpa_update_vq_irq(vq);
>>>>> break;
>>>>> case VHOST_SET_VRING_NUM:
>>>>> @@ -765,6 +829,18 @@ static int vhost_vdpa_open(struct inode
>>>>> *inode, struct file *filep)
>>>>> return r;
>>>>> }
>>>>> +static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
>>>>> +{
>>>>> + struct vhost_virtqueue *vq;
>>>>> + int i;
>>>>> +
>>>>> + for (i = 0; i < v->nvqs; i++) {
>>>>> + vq = &v->vqs[i];
>>>>> + if (vq->call_ctx.producer.irq)
>>>>> + irq_bypass_unregister_producer(&vq->call_ctx.producer);
>>>>> + }
>>>>> +}
>>>>
>>>>
>>>> Why not using vhost_vdpa_unsetup_vq_irq()?
>>> IMHO, in this cleanup phase, the device is almost dead, user space
>>> won't change ctx anymore, so I think we don't need to check ctx or irq,
>>
>>
>> But you check irq here? For ctx, irq_bypass_unregister_producer() can
>> do the check instead of us.
> IMHO, maybe irq does not matter, (1)if the vq not registered to irq bypass manager, producer.irq is not valid, token == NULL, irq_bypass_unregister would no nothing.
> (2)if the vq registered to irq bypass manager, producer.irq is valid, irq_bypass_unregister will do its work based on the token.
> so maybe we can say irq is relative to the token, we may don't need to check irq here.
So you agree to use vhost_vdpa_unsetup_vq_irq()?
Thanks
>
> Thanks!
>>
>> Thanks
>>
>>
>>> can just unregister it.
>>>
>>> Thanks!
>>>>
>>>> Thanks
>>>>
>>>>
>>>>> +
>>>>> static int vhost_vdpa_release(struct inode *inode, struct file
>>>>> *filep)
>>>>> {
>>>>> struct vhost_vdpa *v = filep->private_data;
>>>>> @@ -777,6 +853,7 @@ static int vhost_vdpa_release(struct inode
>>>>> *inode, struct file *filep)
>>>>> vhost_vdpa_iotlb_free(v);
>>>>> vhost_vdpa_free_domain(v);
>>>>> vhost_vdpa_config_put(v);
>>>>> + vhost_vdpa_clean_irq(v);
>>>>> vhost_dev_cleanup(&v->vdev);
>>>>> kfree(v->vdev.vqs);
>>>>> mutex_unlock(&d->mutex);
>>>>
>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
[not found] ` <bf10cde9-db86-a1ac-e2a8-363735e49afb@intel.com>
@ 2020-08-05 5:53 ` Jason Wang
0 siblings, 0 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-05 5:53 UTC (permalink / raw)
To: Zhu, Lingshan, Michael S. Tsirkin
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini
On 2020/8/5 下午1:49, Zhu, Lingshan wrote:
>
>
> On 8/5/2020 10:16 AM, Jason Wang wrote:
>>
>> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>>>> +struct vhost_vring_call {
>>>>>>> + struct eventfd_ctx *ctx;
>>>>>>> + struct irq_bypass_producer producer;
>>>>>>> + spinlock_t ctx_lock;
>>>>>> It's not clear to me why we need ctx_lock here.
>>>>>>
>>>>>> Thanks
>>>>> Hi Jason,
>>>>>
>>>>> we use this lock to protect the eventfd_ctx and irq from race
>>>>> conditions,
>>>> We don't support irq notification from vDPA device driver in this
>>>> version,
>>>> do we still have race condition?
>>>>
>>>> Thanks
>>> Jason I'm not sure what you are trying to say here.
>>
>>
>> I meant we change the API from V4 so driver won't notify us if irq is
>> changed.
>>
>> Then it looks to me there's no need for the ctx_lock, everyhing could
>> be synchronized with vq mutex.
>>
>> Thanks
> from V4 to V5, there are only some minor improvements and bug fix, get_vq_irq() almost stays untouched, mutex can work for this, however I see the vq mutex is used in many scenarios.
> We only use this lock to protect the producer information, can this help to get less coupling, defensive code for less bugs?
I think not, vq mutex is used to protect all vq related data structure,
introducing another one will increase the complexity.
Thanks
>
> Thanks
>>
>>>
>>>
>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa
[not found] ` <20200731065533.4144-5-lingshan.zhu@intel.com>
2020-08-04 8:51 ` [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa Jason Wang
@ 2020-08-05 8:06 ` Jason Wang
1 sibling, 0 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-05 8:06 UTC (permalink / raw)
To: Zhu Lingshan, alex.williamson, mst, pbonzini,
sean.j.christopherson, wanpengli
Cc: shahafs, parav, kvm, netdev, virtualization, eli
On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
> +{
> + struct vhost_virtqueue *vq = &v->vqs[qid];
> + const struct vdpa_config_ops *ops = v->vdpa->config;
> + struct vdpa_device *vdpa = v->vdpa;
> + int ret, irq;
> +
> + spin_lock(&vq->call_ctx.ctx_lock);
> + irq = ops->get_vq_irq(vdpa, qid);
Btw, this assumes that get_vq_irq is mandatory. This looks wrong since
there's no guarantee that the vDPA device driver can see irq. And this
break vdpa simulator.
Let's add a check and make it optional by document this assumption in
the vdpa.h.
Thanks
> + if (!vq->call_ctx.ctx || irq < 0) {
> + spin_unlock(&vq->call_ctx.ctx_lock);
> + return;
> + }
> +
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
2020-08-05 2:16 ` Jason Wang
[not found] ` <bf10cde9-db86-a1ac-e2a8-363735e49afb@intel.com>
@ 2020-08-10 13:37 ` Michael S. Tsirkin
2020-08-11 2:53 ` Jason Wang
1 sibling, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2020-08-10 13:37 UTC (permalink / raw)
To: Jason Wang
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini, Zhu, Lingshan
On Wed, Aug 05, 2020 at 10:16:16AM +0800, Jason Wang wrote:
>
> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
> > > > > > +struct vhost_vring_call {
> > > > > > + struct eventfd_ctx *ctx;
> > > > > > + struct irq_bypass_producer producer;
> > > > > > + spinlock_t ctx_lock;
> > > > > It's not clear to me why we need ctx_lock here.
> > > > >
> > > > > Thanks
> > > > Hi Jason,
> > > >
> > > > we use this lock to protect the eventfd_ctx and irq from race conditions,
> > > We don't support irq notification from vDPA device driver in this version,
> > > do we still have race condition?
> > >
> > > Thanks
> > Jason I'm not sure what you are trying to say here.
>
>
> I meant we change the API from V4 so driver won't notify us if irq is
> changed.
>
> Then it looks to me there's no need for the ctx_lock, everyhing could be
> synchronized with vq mutex.
>
> Thanks
Jason do you want to post a cleanup patch simplifying code along these
lines?
Thanks,
> >
> >
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH V5 1/6] vhost: introduce vhost_vring_call
2020-08-10 13:37 ` Michael S. Tsirkin
@ 2020-08-11 2:53 ` Jason Wang
0 siblings, 0 replies; 13+ messages in thread
From: Jason Wang @ 2020-08-11 2:53 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: shahafs, wanpengli, parav, kvm, netdev, sean.j.christopherson,
virtualization, eli, pbonzini, Zhu, Lingshan
On 2020/8/10 下午9:37, Michael S. Tsirkin wrote:
> On Wed, Aug 05, 2020 at 10:16:16AM +0800, Jason Wang wrote:
>> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>>>> +struct vhost_vring_call {
>>>>>>> + struct eventfd_ctx *ctx;
>>>>>>> + struct irq_bypass_producer producer;
>>>>>>> + spinlock_t ctx_lock;
>>>>>> It's not clear to me why we need ctx_lock here.
>>>>>>
>>>>>> Thanks
>>>>> Hi Jason,
>>>>>
>>>>> we use this lock to protect the eventfd_ctx and irq from race conditions,
>>>> We don't support irq notification from vDPA device driver in this version,
>>>> do we still have race condition?
>>>>
>>>> Thanks
>>> Jason I'm not sure what you are trying to say here.
>>
>> I meant we change the API from V4 so driver won't notify us if irq is
>> changed.
>>
>> Then it looks to me there's no need for the ctx_lock, everyhing could be
>> synchronized with vq mutex.
>>
>> Thanks
> Jason do you want to post a cleanup patch simplifying code along these
> lines?
Ling Shan promised to post a patch to fix this.
Thanks
>
> Thanks,
>
>
>>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2020-08-11 2:54 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20200731065533.4144-1-lingshan.zhu@intel.com>
[not found] ` <20200731065533.4144-2-lingshan.zhu@intel.com>
2020-08-04 8:38 ` [PATCH V5 1/6] vhost: introduce vhost_vring_call Jason Wang
[not found] ` <d51dd4e3-7513-c771-104c-b61f9ee70f30@intel.com>
2020-08-04 8:53 ` Jason Wang
2020-08-04 9:21 ` Michael S. Tsirkin
2020-08-05 2:16 ` Jason Wang
[not found] ` <bf10cde9-db86-a1ac-e2a8-363735e49afb@intel.com>
2020-08-05 5:53 ` Jason Wang
2020-08-10 13:37 ` Michael S. Tsirkin
2020-08-11 2:53 ` Jason Wang
[not found] ` <4605de34-c426-33d4-714b-e03716d0374c@intel.com>
2020-08-05 2:20 ` Jason Wang
[not found] ` <20200731065533.4144-5-lingshan.zhu@intel.com>
2020-08-04 8:51 ` [PATCH V5 4/6] vhost_vdpa: implement IRQ offloading in vhost_vdpa Jason Wang
[not found] ` <ae5385dc-6637-c5a3-b00a-02f66bb9a85f@intel.com>
2020-08-04 9:36 ` Michael S. Tsirkin
2020-08-05 2:36 ` Jason Wang
[not found] ` <cb01a490-e9e6-42f5-b6c3-caefa2d91b9f@intel.com>
2020-08-05 5:51 ` Jason Wang
2020-08-05 8:06 ` Jason Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).