All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: "Eugenio Pérez" <eperezma@redhat.com>, qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	Markus Armbruster <armbru@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	virtualization@lists.linux-foundation.org,
	Eduardo Habkost <ehabkost@redhat.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Xiao W Wang <xiao.w.wang@intel.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Eli Cohen <eli@mellanox.com>, Paolo Bonzini <pbonzini@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Eric Blake <eblake@redhat.com>
Subject: Re: [PATCH 17/31] vdpa: adapt vhost_ops callbacks to svq
Date: Sun, 30 Jan 2022 12:03:36 +0800	[thread overview]
Message-ID: <82b8c3bf-1b11-86c7-4fad-294f5ccf1278@redhat.com> (raw)
In-Reply-To: <20220121202733.404989-18-eperezma@redhat.com>


在 2022/1/22 上午4:27, Eugenio Pérez 写道:
> First half of the buffers forwarding part, preparing vhost-vdpa
> callbacks to SVQ to offer it. QEMU cannot enable it at this moment, so
> this is effectively dead code at the moment, but it helps to reduce
> patch size.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>   hw/virtio/vhost-shadow-virtqueue.h |   2 +-
>   hw/virtio/vhost-shadow-virtqueue.c |  21 ++++-
>   hw/virtio/vhost-vdpa.c             | 133 ++++++++++++++++++++++++++---
>   3 files changed, 143 insertions(+), 13 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
> index 035207a469..39aef5ffdf 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.h
> +++ b/hw/virtio/vhost-shadow-virtqueue.h
> @@ -35,7 +35,7 @@ size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
>   
>   void vhost_svq_stop(VhostShadowVirtqueue *svq);
>   
> -VhostShadowVirtqueue *vhost_svq_new(void);
> +VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize);
>   
>   void vhost_svq_free(VhostShadowVirtqueue *vq);
>   
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> index f129ec8395..7c168075d7 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -277,9 +277,17 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
>   /**
>    * Creates vhost shadow virtqueue, and instruct vhost device to use the shadow
>    * methods and file descriptors.
> + *
> + * @qsize Shadow VirtQueue size
> + *
> + * Returns the new virtqueue or NULL.
> + *
> + * In case of error, reason is reported through error_report.
>    */
> -VhostShadowVirtqueue *vhost_svq_new(void)
> +VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize)
>   {
> +    size_t desc_size = sizeof(vring_desc_t) * qsize;
> +    size_t device_size, driver_size;
>       g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
>       int r;
>   
> @@ -300,6 +308,15 @@ VhostShadowVirtqueue *vhost_svq_new(void)
>       /* Placeholder descriptor, it should be deleted at set_kick_fd */
>       event_notifier_init_fd(&svq->svq_kick, INVALID_SVQ_KICK_FD);
>   
> +    svq->vring.num = qsize;


I wonder if this is the best. E.g some hardware can support up to 32K 
queue size. So this will probably end up with:

1) SVQ use 32K queue size
2) hardware queue uses 256

? Or we SVQ can stick to 256 but this will this cause trouble if we want 
to add event index support?


> +    driver_size = vhost_svq_driver_area_size(svq);
> +    device_size = vhost_svq_device_area_size(svq);
> +    svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
> +    svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
> +    memset(svq->vring.desc, 0, driver_size);
> +    svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
> +    memset(svq->vring.used, 0, device_size);
> +
>       event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
>       return g_steal_pointer(&svq);
>   
> @@ -318,5 +335,7 @@ void vhost_svq_free(VhostShadowVirtqueue *vq)
>       event_notifier_cleanup(&vq->hdev_kick);
>       event_notifier_set_handler(&vq->hdev_call, NULL);
>       event_notifier_cleanup(&vq->hdev_call);
> +    qemu_vfree(vq->vring.desc);
> +    qemu_vfree(vq->vring.used);
>       g_free(vq);
>   }
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 9d801cf907..53e14bafa0 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -641,20 +641,52 @@ static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
>   }
>   
> -static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> -                                      struct vhost_vring_state *ring)
> +static int vhost_vdpa_set_dev_vring_num(struct vhost_dev *dev,
> +                                        struct vhost_vring_state *ring)
>   {
>       trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
>   }
>   
> -static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> -                                       struct vhost_vring_state *ring)
> +static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> +                                    struct vhost_vring_state *ring)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    if (v->shadow_vqs_enabled) {
> +        /*
> +         * Vring num was set at device start. SVQ num is handled by VirtQueue
> +         * code
> +         */
> +        return 0;
> +    }
> +
> +    return vhost_vdpa_set_dev_vring_num(dev, ring);
> +}
> +
> +static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
> +                                         struct vhost_vring_state *ring)
>   {
>       trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
>   }
>   
> +static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> +                                     struct vhost_vring_state *ring)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    if (v->shadow_vqs_enabled) {
> +        /*
> +         * Vring base was set at device start. SVQ base is handled by VirtQueue
> +         * code
> +         */
> +        return 0;
> +    }
> +
> +    return vhost_vdpa_set_dev_vring_base(dev, ring);
> +}
> +
>   static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
>                                          struct vhost_vring_state *ring)
>   {
> @@ -784,8 +816,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>       }
>   }
>   
> -static int vhost_vdpa_get_features(struct vhost_dev *dev,
> -                                     uint64_t *features)
> +static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
> +                                       uint64_t *features)
>   {
>       int ret;
>   
> @@ -794,15 +826,64 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev,
>       return ret;
>   }
>   
> +static int vhost_vdpa_get_features(struct vhost_dev *dev, uint64_t *features)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +    int ret = vhost_vdpa_get_dev_features(dev, features);
> +
> +    if (ret == 0 && v->shadow_vqs_enabled) {
> +        /* Filter only features that SVQ can offer to guest */
> +        vhost_svq_valid_guest_features(features);
> +    }


Sorry if I've asked before, I think it's sufficient to filter out the 
device features that we don't support during and fail the vhost 
initialization. Any reason we need do it again here?


> +
> +    return ret;
> +}
> +
>   static int vhost_vdpa_set_features(struct vhost_dev *dev,
>                                      uint64_t features)
>   {
> +    struct vhost_vdpa *v = dev->opaque;
>       int ret;
>   
>       if (vhost_vdpa_one_time_request(dev)) {
>           return 0;
>       }
>   
> +    if (v->shadow_vqs_enabled) {
> +        uint64_t dev_features, svq_features, acked_features;
> +        bool ok;
> +
> +        ret = vhost_vdpa_get_dev_features(dev, &dev_features);
> +        if (ret != 0) {
> +            error_report("Can't get vdpa device features, got (%d)", ret);
> +            return ret;
> +        }
> +
> +        svq_features = dev_features;
> +        ok = vhost_svq_valid_device_features(&svq_features);
> +        if (unlikely(!ok)) {
> +            error_report("SVQ Invalid device feature flags, offer: 0x%"
> +                         PRIx64", ok: 0x%"PRIx64, dev->features, svq_features);
> +            return -1;
> +        }
> +
> +        ok = vhost_svq_valid_guest_features(&features);
> +        if (unlikely(!ok)) {
> +            error_report(
> +                "Invalid guest acked feature flag, acked: 0x%"
> +                PRIx64", ok: 0x%"PRIx64, dev->acked_features, features);
> +            return -1;
> +        }
> +
> +        ok = vhost_svq_ack_guest_features(svq_features, features,
> +                                          &acked_features);
> +        if (unlikely(!ok)) {
> +            return -1;
> +        }
> +
> +        features = acked_features;
> +    }
> +
>       trace_vhost_vdpa_set_features(dev, features);
>       ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
>       if (ret) {
> @@ -822,13 +903,31 @@ static int vhost_vdpa_set_owner(struct vhost_dev *dev)
>       return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
>   }
>   
> -static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
> -                    struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
> +static void vhost_vdpa_vq_get_guest_addr(struct vhost_vring_addr *addr,
> +                                         struct vhost_virtqueue *vq)
>   {
> -    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
>       addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
>       addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
>       addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
> +}
> +
> +static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
> +                                  struct vhost_vring_addr *addr,
> +                                  struct vhost_virtqueue *vq)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> +
> +    if (v->shadow_vqs_enabled) {
> +        int idx = vhost_vdpa_get_vq_index(dev, addr->index);
> +        VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, idx);
> +
> +        vhost_svq_get_vring_addr(svq, addr);
> +    } else {
> +        vhost_vdpa_vq_get_guest_addr(addr, vq);
> +    }
> +
>       trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
>                                    addr->avail_user_addr, addr->used_user_addr);
>       return 0;
> @@ -849,6 +948,12 @@ static void vhost_psvq_free(gpointer svq)
>       vhost_svq_free(svq);
>   }
>   
> +static int vhost_vdpa_get_max_queue_size(struct vhost_dev *dev,
> +                                         uint16_t *qsize)
> +{
> +    return vhost_vdpa_call(dev, VHOST_VDPA_GET_VRING_NUM, qsize);
> +}
> +
>   static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>                                  Error **errp)
>   {
> @@ -857,6 +962,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>                                                              vhost_psvq_free);
>       uint64_t dev_features;
>       uint64_t svq_features;
> +    uint16_t qsize;
>       int r;
>       bool ok;
>   
> @@ -864,7 +970,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>           goto out;
>       }
>   
> -    r = vhost_vdpa_get_features(hdev, &dev_features);
> +    r = vhost_vdpa_get_dev_features(hdev, &dev_features);
>       if (r != 0) {
>           error_setg(errp, "Can't get vdpa device features, got (%d)", r);
>           return r;
> @@ -879,9 +985,14 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>           return -1;
>       }
>   
> +    r = vhost_vdpa_get_max_queue_size(hdev, &qsize);
> +    if (unlikely(r)) {
> +        qsize = 256;
> +    }


Should we fail instead of having a "default" value here?

Thanks


> +
>       shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_psvq_free);
>       for (unsigned n = 0; n < hdev->nvqs; ++n) {
> -        VhostShadowVirtqueue *svq = vhost_svq_new();
> +        VhostShadowVirtqueue *svq = vhost_svq_new(qsize);
>   
>           if (unlikely(!svq)) {
>               error_setg(errp, "Cannot create svq %u", n);

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

WARNING: multiple messages have this Message-ID (diff)
From: Jason Wang <jasowang@redhat.com>
To: "Eugenio Pérez" <eperezma@redhat.com>, qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Juan Quintela <quintela@redhat.com>,
	Richard Henderson <richard.henderson@linaro.org>,
	Markus Armbruster <armbru@redhat.com>,
	Gautam Dawar <gdawar@xilinx.com>,
	virtualization@lists.linux-foundation.org,
	Eduardo Habkost <ehabkost@redhat.com>,
	Harpreet Singh Anand <hanand@xilinx.com>,
	Xiao W Wang <xiao.w.wang@intel.com>, Peter Xu <peterx@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Eli Cohen <eli@mellanox.com>, Paolo Bonzini <pbonzini@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Eric Blake <eblake@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>
Subject: Re: [PATCH 17/31] vdpa: adapt vhost_ops callbacks to svq
Date: Sun, 30 Jan 2022 12:03:36 +0800	[thread overview]
Message-ID: <82b8c3bf-1b11-86c7-4fad-294f5ccf1278@redhat.com> (raw)
In-Reply-To: <20220121202733.404989-18-eperezma@redhat.com>


在 2022/1/22 上午4:27, Eugenio Pérez 写道:
> First half of the buffers forwarding part, preparing vhost-vdpa
> callbacks to SVQ to offer it. QEMU cannot enable it at this moment, so
> this is effectively dead code at the moment, but it helps to reduce
> patch size.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>   hw/virtio/vhost-shadow-virtqueue.h |   2 +-
>   hw/virtio/vhost-shadow-virtqueue.c |  21 ++++-
>   hw/virtio/vhost-vdpa.c             | 133 ++++++++++++++++++++++++++---
>   3 files changed, 143 insertions(+), 13 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
> index 035207a469..39aef5ffdf 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.h
> +++ b/hw/virtio/vhost-shadow-virtqueue.h
> @@ -35,7 +35,7 @@ size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
>   
>   void vhost_svq_stop(VhostShadowVirtqueue *svq);
>   
> -VhostShadowVirtqueue *vhost_svq_new(void);
> +VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize);
>   
>   void vhost_svq_free(VhostShadowVirtqueue *vq);
>   
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> index f129ec8395..7c168075d7 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -277,9 +277,17 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
>   /**
>    * Creates vhost shadow virtqueue, and instruct vhost device to use the shadow
>    * methods and file descriptors.
> + *
> + * @qsize Shadow VirtQueue size
> + *
> + * Returns the new virtqueue or NULL.
> + *
> + * In case of error, reason is reported through error_report.
>    */
> -VhostShadowVirtqueue *vhost_svq_new(void)
> +VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize)
>   {
> +    size_t desc_size = sizeof(vring_desc_t) * qsize;
> +    size_t device_size, driver_size;
>       g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
>       int r;
>   
> @@ -300,6 +308,15 @@ VhostShadowVirtqueue *vhost_svq_new(void)
>       /* Placeholder descriptor, it should be deleted at set_kick_fd */
>       event_notifier_init_fd(&svq->svq_kick, INVALID_SVQ_KICK_FD);
>   
> +    svq->vring.num = qsize;


I wonder if this is the best. E.g some hardware can support up to 32K 
queue size. So this will probably end up with:

1) SVQ use 32K queue size
2) hardware queue uses 256

? Or we SVQ can stick to 256 but this will this cause trouble if we want 
to add event index support?


> +    driver_size = vhost_svq_driver_area_size(svq);
> +    device_size = vhost_svq_device_area_size(svq);
> +    svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
> +    svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
> +    memset(svq->vring.desc, 0, driver_size);
> +    svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
> +    memset(svq->vring.used, 0, device_size);
> +
>       event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
>       return g_steal_pointer(&svq);
>   
> @@ -318,5 +335,7 @@ void vhost_svq_free(VhostShadowVirtqueue *vq)
>       event_notifier_cleanup(&vq->hdev_kick);
>       event_notifier_set_handler(&vq->hdev_call, NULL);
>       event_notifier_cleanup(&vq->hdev_call);
> +    qemu_vfree(vq->vring.desc);
> +    qemu_vfree(vq->vring.used);
>       g_free(vq);
>   }
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 9d801cf907..53e14bafa0 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -641,20 +641,52 @@ static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
>   }
>   
> -static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> -                                      struct vhost_vring_state *ring)
> +static int vhost_vdpa_set_dev_vring_num(struct vhost_dev *dev,
> +                                        struct vhost_vring_state *ring)
>   {
>       trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
>   }
>   
> -static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> -                                       struct vhost_vring_state *ring)
> +static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
> +                                    struct vhost_vring_state *ring)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    if (v->shadow_vqs_enabled) {
> +        /*
> +         * Vring num was set at device start. SVQ num is handled by VirtQueue
> +         * code
> +         */
> +        return 0;
> +    }
> +
> +    return vhost_vdpa_set_dev_vring_num(dev, ring);
> +}
> +
> +static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
> +                                         struct vhost_vring_state *ring)
>   {
>       trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
>       return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
>   }
>   
> +static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
> +                                     struct vhost_vring_state *ring)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    if (v->shadow_vqs_enabled) {
> +        /*
> +         * Vring base was set at device start. SVQ base is handled by VirtQueue
> +         * code
> +         */
> +        return 0;
> +    }
> +
> +    return vhost_vdpa_set_dev_vring_base(dev, ring);
> +}
> +
>   static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
>                                          struct vhost_vring_state *ring)
>   {
> @@ -784,8 +816,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>       }
>   }
>   
> -static int vhost_vdpa_get_features(struct vhost_dev *dev,
> -                                     uint64_t *features)
> +static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
> +                                       uint64_t *features)
>   {
>       int ret;
>   
> @@ -794,15 +826,64 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev,
>       return ret;
>   }
>   
> +static int vhost_vdpa_get_features(struct vhost_dev *dev, uint64_t *features)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +    int ret = vhost_vdpa_get_dev_features(dev, features);
> +
> +    if (ret == 0 && v->shadow_vqs_enabled) {
> +        /* Filter only features that SVQ can offer to guest */
> +        vhost_svq_valid_guest_features(features);
> +    }


Sorry if I've asked before, I think it's sufficient to filter out the 
device features that we don't support during and fail the vhost 
initialization. Any reason we need do it again here?


> +
> +    return ret;
> +}
> +
>   static int vhost_vdpa_set_features(struct vhost_dev *dev,
>                                      uint64_t features)
>   {
> +    struct vhost_vdpa *v = dev->opaque;
>       int ret;
>   
>       if (vhost_vdpa_one_time_request(dev)) {
>           return 0;
>       }
>   
> +    if (v->shadow_vqs_enabled) {
> +        uint64_t dev_features, svq_features, acked_features;
> +        bool ok;
> +
> +        ret = vhost_vdpa_get_dev_features(dev, &dev_features);
> +        if (ret != 0) {
> +            error_report("Can't get vdpa device features, got (%d)", ret);
> +            return ret;
> +        }
> +
> +        svq_features = dev_features;
> +        ok = vhost_svq_valid_device_features(&svq_features);
> +        if (unlikely(!ok)) {
> +            error_report("SVQ Invalid device feature flags, offer: 0x%"
> +                         PRIx64", ok: 0x%"PRIx64, dev->features, svq_features);
> +            return -1;
> +        }
> +
> +        ok = vhost_svq_valid_guest_features(&features);
> +        if (unlikely(!ok)) {
> +            error_report(
> +                "Invalid guest acked feature flag, acked: 0x%"
> +                PRIx64", ok: 0x%"PRIx64, dev->acked_features, features);
> +            return -1;
> +        }
> +
> +        ok = vhost_svq_ack_guest_features(svq_features, features,
> +                                          &acked_features);
> +        if (unlikely(!ok)) {
> +            return -1;
> +        }
> +
> +        features = acked_features;
> +    }
> +
>       trace_vhost_vdpa_set_features(dev, features);
>       ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
>       if (ret) {
> @@ -822,13 +903,31 @@ static int vhost_vdpa_set_owner(struct vhost_dev *dev)
>       return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
>   }
>   
> -static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
> -                    struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
> +static void vhost_vdpa_vq_get_guest_addr(struct vhost_vring_addr *addr,
> +                                         struct vhost_virtqueue *vq)
>   {
> -    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
>       addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
>       addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
>       addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
> +}
> +
> +static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
> +                                  struct vhost_vring_addr *addr,
> +                                  struct vhost_virtqueue *vq)
> +{
> +    struct vhost_vdpa *v = dev->opaque;
> +
> +    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> +
> +    if (v->shadow_vqs_enabled) {
> +        int idx = vhost_vdpa_get_vq_index(dev, addr->index);
> +        VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, idx);
> +
> +        vhost_svq_get_vring_addr(svq, addr);
> +    } else {
> +        vhost_vdpa_vq_get_guest_addr(addr, vq);
> +    }
> +
>       trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
>                                    addr->avail_user_addr, addr->used_user_addr);
>       return 0;
> @@ -849,6 +948,12 @@ static void vhost_psvq_free(gpointer svq)
>       vhost_svq_free(svq);
>   }
>   
> +static int vhost_vdpa_get_max_queue_size(struct vhost_dev *dev,
> +                                         uint16_t *qsize)
> +{
> +    return vhost_vdpa_call(dev, VHOST_VDPA_GET_VRING_NUM, qsize);
> +}
> +
>   static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>                                  Error **errp)
>   {
> @@ -857,6 +962,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>                                                              vhost_psvq_free);
>       uint64_t dev_features;
>       uint64_t svq_features;
> +    uint16_t qsize;
>       int r;
>       bool ok;
>   
> @@ -864,7 +970,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>           goto out;
>       }
>   
> -    r = vhost_vdpa_get_features(hdev, &dev_features);
> +    r = vhost_vdpa_get_dev_features(hdev, &dev_features);
>       if (r != 0) {
>           error_setg(errp, "Can't get vdpa device features, got (%d)", r);
>           return r;
> @@ -879,9 +985,14 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>           return -1;
>       }
>   
> +    r = vhost_vdpa_get_max_queue_size(hdev, &qsize);
> +    if (unlikely(r)) {
> +        qsize = 256;
> +    }


Should we fail instead of having a "default" value here?

Thanks


> +
>       shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_psvq_free);
>       for (unsigned n = 0; n < hdev->nvqs; ++n) {
> -        VhostShadowVirtqueue *svq = vhost_svq_new();
> +        VhostShadowVirtqueue *svq = vhost_svq_new(qsize);
>   
>           if (unlikely(!svq)) {
>               error_setg(errp, "Cannot create svq %u", n);



  reply	other threads:[~2022-01-30  4:04 UTC|newest]

Thread overview: 182+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-21 20:27 [PATCH 00/31] vDPA shadow virtqueue Eugenio Pérez
2022-01-21 20:27 ` [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions Eugenio Pérez
2022-01-28  5:59   ` Jason Wang
2022-01-28  5:59     ` Jason Wang
2022-01-28  7:57     ` Eugenio Perez Martin
2022-02-21  7:31       ` Jason Wang
2022-02-21  7:31         ` Jason Wang
2022-02-21  7:42         ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 02/31] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2022-01-26  8:53   ` Eugenio Perez Martin
2022-01-28  6:00   ` Jason Wang
2022-01-28  6:00     ` Jason Wang
2022-01-28  8:10     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 03/31] vdpa: Add vhost_svq_get_dev_kick_notifier Eugenio Pérez
2022-01-28  6:03   ` Jason Wang
2022-01-28  6:03     ` Jason Wang
2022-01-31  9:33     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 04/31] vdpa: Add vhost_svq_set_svq_kick_fd Eugenio Pérez
2022-01-28  6:29   ` Jason Wang
2022-01-28  6:29     ` Jason Wang
2022-01-31 10:18     ` Eugenio Perez Martin
2022-02-08  8:47       ` Jason Wang
2022-02-08  8:47         ` Jason Wang
2022-02-18 18:22         ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 05/31] vhost: Add Shadow VirtQueue kick forwarding capabilities Eugenio Pérez
2022-01-28  6:32   ` Jason Wang
2022-01-28  6:32     ` Jason Wang
2022-01-31 10:48     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 06/31] vhost: Route guest->host notification through shadow virtqueue Eugenio Pérez
2022-01-28  6:56   ` Jason Wang
2022-01-28  6:56     ` Jason Wang
2022-01-31 11:33     ` Eugenio Perez Martin
2022-02-08  9:02       ` Jason Wang
2022-02-08  9:02         ` Jason Wang
2022-01-21 20:27 ` [PATCH 07/31] vhost: dd vhost_svq_get_svq_call_notifier Eugenio Pérez
2022-01-29  7:57   ` Jason Wang
2022-01-29  7:57     ` Jason Wang
2022-01-29 17:49     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 08/31] vhost: Add vhost_svq_set_guest_call_notifier Eugenio Pérez
2022-01-21 20:27 ` [PATCH 09/31] vhost-vdpa: Take into account SVQ in vhost_vdpa_set_vring_call Eugenio Pérez
2022-01-29  8:05   ` Jason Wang
2022-01-29  8:05     ` Jason Wang
2022-01-31 15:34     ` Eugenio Perez Martin
2022-02-08  3:23       ` Jason Wang
2022-02-08  3:23         ` Jason Wang
2022-02-18 12:35         ` Eugenio Perez Martin
2022-02-21  7:39           ` Jason Wang
2022-02-21  7:39             ` Jason Wang
2022-02-21  8:01             ` Eugenio Perez Martin
2022-02-22  7:18               ` Jason Wang
2022-02-22  7:18                 ` Jason Wang
2022-01-21 20:27 ` [PATCH 10/31] vhost: Route host->guest notification through shadow virtqueue Eugenio Pérez
2022-01-21 20:27 ` [PATCH 11/31] vhost: Add vhost_svq_valid_device_features to shadow vq Eugenio Pérez
2022-01-29  8:11   ` Jason Wang
2022-01-29  8:11     ` Jason Wang
2022-01-31 15:49     ` Eugenio Perez Martin
2022-02-01 10:57       ` Eugenio Perez Martin
2022-02-08  3:37         ` Jason Wang
2022-02-08  3:37           ` Jason Wang
2022-02-26  9:11   ` Liuxiangdong via
2022-02-26 11:12     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 12/31] vhost: Add vhost_svq_valid_guest_features " Eugenio Pérez
2022-01-21 20:27 ` [PATCH 13/31] vhost: Add vhost_svq_ack_guest_features " Eugenio Pérez
2022-01-21 20:27 ` [PATCH 14/31] virtio: Add vhost_shadow_vq_get_vring_addr Eugenio Pérez
2022-01-21 20:27 ` [PATCH 15/31] vdpa: Add vhost_svq_get_num Eugenio Pérez
2022-01-29  8:14   ` Jason Wang
2022-01-29  8:14     ` Jason Wang
2022-01-31 16:36     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 16/31] vhost: pass queue index to vhost_vq_get_addr Eugenio Pérez
2022-01-29  8:20   ` Jason Wang
2022-01-29  8:20     ` Jason Wang
2022-01-31 17:44     ` Eugenio Perez Martin
2022-02-08  6:58       ` Jason Wang
2022-02-08  6:58         ` Jason Wang
2022-01-21 20:27 ` [PATCH 17/31] vdpa: adapt vhost_ops callbacks to svq Eugenio Pérez
2022-01-30  4:03   ` Jason Wang [this message]
2022-01-30  4:03     ` Jason Wang
2022-01-31 18:58     ` Eugenio Perez Martin
2022-02-08  3:57       ` Jason Wang
2022-02-08  3:57         ` Jason Wang
2022-02-17 17:13         ` Eugenio Perez Martin
2022-02-21  7:15           ` Jason Wang
2022-02-21  7:15             ` Jason Wang
2022-02-21 17:22             ` Eugenio Perez Martin
2022-02-22  3:16               ` Jason Wang
2022-02-22  3:16                 ` Jason Wang
2022-02-22  7:42                 ` Eugenio Perez Martin
2022-02-22  7:59                   ` Jason Wang
2022-02-22  7:59                     ` Jason Wang
2022-01-21 20:27 ` [PATCH 18/31] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2022-01-30  4:42   ` Jason Wang
2022-01-30  4:42     ` Jason Wang
2022-02-01 17:08     ` Eugenio Perez Martin
2022-02-08  8:11       ` Jason Wang
2022-02-08  8:11         ` Jason Wang
2022-02-22 19:01         ` Eugenio Perez Martin
2022-02-23  2:03           ` Jason Wang
2022-02-23  2:03             ` Jason Wang
2022-01-30  6:46   ` Jason Wang
2022-01-30  6:46     ` Jason Wang
2022-02-01 11:25     ` Eugenio Perez Martin
2022-02-08  8:15       ` Jason Wang
2022-02-08  8:15         ` Jason Wang
2022-02-17 12:48         ` Eugenio Perez Martin
2022-02-21  7:43           ` Jason Wang
2022-02-21  7:43             ` Jason Wang
2022-02-21  8:15             ` Eugenio Perez Martin
2022-02-22  7:26               ` Jason Wang
2022-02-22  7:26                 ` Jason Wang
2022-02-22  8:55                 ` Eugenio Perez Martin
2022-02-23  2:26                   ` Jason Wang
2022-02-23  2:26                     ` Jason Wang
2022-01-21 20:27 ` [PATCH 19/31] utils: Add internal DMAMap to iova-tree Eugenio Pérez
2022-01-21 20:27 ` [PATCH 20/31] util: Store DMA entries in a list Eugenio Pérez
2022-01-21 20:27 ` [PATCH 21/31] util: Add iova_tree_alloc Eugenio Pérez
2022-01-24  4:32   ` Peter Xu
2022-01-24  4:32     ` Peter Xu
2022-01-24  9:20     ` Eugenio Perez Martin
2022-01-24 11:07       ` Peter Xu
2022-01-24 11:07         ` Peter Xu
2022-01-25  9:40         ` Eugenio Perez Martin
2022-01-27  8:06           ` Peter Xu
2022-01-27  8:06             ` Peter Xu
2022-01-27  9:24             ` Eugenio Perez Martin
2022-01-28  3:57               ` Peter Xu
2022-01-28  3:57                 ` Peter Xu
2022-01-28  5:55                 ` Jason Wang
2022-01-28  5:55                   ` Jason Wang
2022-01-28  7:48                   ` Eugenio Perez Martin
2022-02-15 19:34                   ` Eugenio Pérez
2022-02-15 19:34                   ` [PATCH] util: Add iova_tree_alloc Eugenio Pérez
2022-02-16  7:25                     ` Peter Xu
2022-01-30  5:06       ` [PATCH 21/31] " Jason Wang
2022-01-30  5:06         ` Jason Wang
2022-01-21 20:27 ` [PATCH 22/31] vhost: Add VhostIOVATree Eugenio Pérez
2022-01-30  5:21   ` Jason Wang
2022-01-30  5:21     ` Jason Wang
2022-02-01 17:27     ` Eugenio Perez Martin
2022-02-08  8:17       ` Jason Wang
2022-02-08  8:17         ` Jason Wang
2022-01-21 20:27 ` [PATCH 23/31] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2022-01-30  5:57   ` Jason Wang
2022-01-30  5:57     ` Jason Wang
2022-01-31 19:11     ` Eugenio Perez Martin
2022-02-08  8:19       ` Jason Wang
2022-02-08  8:19         ` Jason Wang
2022-01-21 20:27 ` [PATCH 24/31] vhost: Add vhost_svq_get_last_used_idx Eugenio Pérez
2022-01-21 20:27 ` [PATCH 25/31] vdpa: Adapt vhost_vdpa_get_vring_base to SVQ Eugenio Pérez
2022-01-21 20:27 ` [PATCH 26/31] vdpa: Clear VHOST_VRING_F_LOG at vhost_vdpa_set_vring_addr in SVQ Eugenio Pérez
2022-01-21 20:27 ` [PATCH 27/31] vdpa: Never set log_base addr if SVQ is enabled Eugenio Pérez
2022-01-21 20:27 ` [PATCH 28/31] vdpa: Expose VHOST_F_LOG_ALL on SVQ Eugenio Pérez
2022-01-30  6:50   ` Jason Wang
2022-01-30  6:50     ` Jason Wang
2022-02-01 11:45     ` Eugenio Perez Martin
2022-02-08  8:25       ` Jason Wang
2022-02-08  8:25         ` Jason Wang
2022-02-16 15:53         ` Eugenio Perez Martin
2022-02-17  6:02           ` Jason Wang
2022-02-17  6:02             ` Jason Wang
2022-02-17  8:22             ` Eugenio Perez Martin
2022-02-22  7:41               ` Jason Wang
2022-02-22  7:41                 ` Jason Wang
2022-02-22  8:05                 ` Eugenio Perez Martin
2022-02-23  3:46                   ` Jason Wang
2022-02-23  3:46                     ` Jason Wang
2022-02-23  8:06                     ` Eugenio Perez Martin
2022-02-24  3:45                       ` Jason Wang
2022-02-24  3:45                         ` Jason Wang
2022-01-21 20:27 ` [PATCH 29/31] vdpa: Make ncs autofree Eugenio Pérez
2022-01-30  6:51   ` Jason Wang
2022-01-30  6:51     ` Jason Wang
2022-02-01 17:10     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 30/31] vdpa: Move vhost_vdpa_get_iova_range to net/vhost-vdpa.c Eugenio Pérez
2022-01-30  6:53   ` Jason Wang
2022-01-30  6:53     ` Jason Wang
2022-02-01 17:11     ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 31/31] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-01-28  6:02 ` [PATCH 00/31] vDPA shadow virtqueue Jason Wang
2022-01-28  6:02   ` Jason Wang
2022-01-31  9:15   ` Eugenio Perez Martin
2022-02-08  8:27     ` Jason Wang
2022-02-08  8:27       ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=82b8c3bf-1b11-86c7-4fad-294f5ccf1278@redhat.com \
    --to=jasowang@redhat.com \
    --cc=armbru@redhat.com \
    --cc=eblake@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=eli@mellanox.com \
    --cc=eperezma@redhat.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=lingshan.zhu@intel.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xiao.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.