All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: wexu@redhat.com, qemu-devel@nongnu.org
Cc: tiwei.bie@intel.com, jfreimann@redhat.com, maxime.coquelin@redhat.com
Subject: Re: [Qemu-devel] [[RFC v3 03/12] virtio: init memory cache for packed ring
Date: Mon, 15 Oct 2018 11:10:12 +0800	[thread overview]
Message-ID: <fef2ece0-03ca-3eef-3c21-31ee0a6440b7@redhat.com> (raw)
In-Reply-To: <1539266915-15216-4-git-send-email-wexu@redhat.com>



On 2018年10月11日 22:08, wexu@redhat.com wrote:
> From: Wei Xu <wexu@redhat.com>
>
> Expand 1.0 by adding offset calculation accordingly.

This is only part of what this patch did and I suggest to another patch 
to do this.

>
> Signed-off-by: Wei Xu <wexu@redhat.com>
> ---
>   hw/virtio/vhost.c          | 16 ++++++++--------
>   hw/virtio/virtio.c         | 35 +++++++++++++++++++++++------------
>   include/hw/virtio/virtio.h |  4 ++--
>   3 files changed, 33 insertions(+), 22 deletions(-)
>
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 569c405..9df2da3 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -996,14 +996,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>           r = -ENOMEM;
>           goto fail_alloc_desc;
>       }
> -    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
> +    vq->avail_size = s = l = virtio_queue_get_driver_size(vdev, idx);

Let's try to use a more consistent name. E.g either use avail/used or 
driver/device.

I prefer to use avail/used, it can save lots of unnecessary changes.

>       vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
>       vq->avail = vhost_memory_map(dev, a, &l, 0);
>       if (!vq->avail || l != s) {
>           r = -ENOMEM;
>           goto fail_alloc_avail;
>       }
> -    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
> +    vq->used_size = s = l = virtio_queue_get_device_size(vdev, idx);
>       vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
>       vq->used = vhost_memory_map(dev, a, &l, 1);
>       if (!vq->used || l != s) {
> @@ -1051,10 +1051,10 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>   fail_vector:
>   fail_kick:
>   fail_alloc:
> -    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
> +    vhost_memory_unmap(dev, vq->used, virtio_queue_get_device_size(vdev, idx),
>                          0, 0);
>   fail_alloc_used:
> -    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
> +    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_driver_size(vdev, idx),
>                          0, 0);
>   fail_alloc_avail:
>       vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
> @@ -1101,10 +1101,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
>                                                   vhost_vq_index);
>       }
>   
> -    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
> -                       1, virtio_queue_get_used_size(vdev, idx));
> -    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
> -                       0, virtio_queue_get_avail_size(vdev, idx));
> +    vhost_memory_unmap(dev, vq->used, virtio_queue_get_device_size(vdev, idx),
> +                       1, virtio_queue_get_device_size(vdev, idx));
> +    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_driver_size(vdev, idx),
> +                       0, virtio_queue_get_driver_size(vdev, idx));
>       vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
>                          0, virtio_queue_get_desc_size(vdev, idx));
>   }
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 500eecf..bfb3364 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -162,11 +162,8 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
>       VRingMemoryRegionCaches *old = vq->vring.caches;
>       VRingMemoryRegionCaches *new = NULL;
>       hwaddr addr, size;
> -    int event_size;
>       int64_t len;
>   
> -    event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
> -
>       addr = vq->vring.desc;
>       if (!addr) {
>           goto out_no_cache;
> @@ -174,13 +171,13 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
>       new = g_new0(VRingMemoryRegionCaches, 1);
>       size = virtio_queue_get_desc_size(vdev, n);
>       len = address_space_cache_init(&new->desc, vdev->dma_as,
> -                                   addr, size, false);
> +                                   addr, size, true);

This looks wrong, for split virtqueue, descriptor ring is read only.

>       if (len < size) {
>           virtio_error(vdev, "Cannot map desc");
>           goto err_desc;
>       }
>   
> -    size = virtio_queue_get_used_size(vdev, n) + event_size;
> +    size = virtio_queue_get_device_size(vdev, n);
>       len = address_space_cache_init(&new->used, vdev->dma_as,
>                                      vq->vring.used, size, true);
>       if (len < size) {
> @@ -188,7 +185,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
>           goto err_used;
>       }
>   
> -    size = virtio_queue_get_avail_size(vdev, n) + event_size;
> +    size = virtio_queue_get_driver_size(vdev, n);
>       len = address_space_cache_init(&new->avail, vdev->dma_as,
>                                      vq->vring.avail, size, false);
>       if (len < size) {
> @@ -2339,16 +2336,30 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
>       return sizeof(VRingDesc) * vdev->vq[n].vring.num;
>   }
>   
> -hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
> +hwaddr virtio_queue_get_driver_size(VirtIODevice *vdev, int n)
>   {
> -    return offsetof(VRingAvail, ring) +
> -        sizeof(uint16_t) * vdev->vq[n].vring.num;
> +    int s;
> +
> +    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> +        return sizeof(struct VRingPackedDescEvent);
> +    } else {
> +        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
> +        return offsetof(VRingAvail, ring) +
> +            sizeof(uint16_t) * vdev->vq[n].vring.num + s;

I tend to move this to an independent patch.

Thanks

> +    }
>   }
>   
> -hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
> +hwaddr virtio_queue_get_device_size(VirtIODevice *vdev, int n)
>   {
> -    return offsetof(VRingUsed, ring) +
> -        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
> +    int s;
> +
> +    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> +        return sizeof(struct VRingPackedDescEvent);
> +    } else {
> +        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
> +        return offsetof(VRingUsed, ring) +
> +            sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
> +    }
>   }
>   
>   uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 9c1fa07..e323e76 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -270,8 +270,8 @@ hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
>   hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
>   hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
>   hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
> -hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
> -hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
> +hwaddr virtio_queue_get_driver_size(VirtIODevice *vdev, int n);
> +hwaddr virtio_queue_get_device_size(VirtIODevice *vdev, int n);
>   uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
>   void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
>   void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);

  reply	other threads:[~2018-10-15  3:10 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-11 14:08 [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 01/12] virtio: introduce packed ring definitions wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 02/12] virtio: redefine structure & memory cache for packed ring wexu
2018-10-15  3:03   ` Jason Wang
2018-10-15  7:26     ` Wei Xu
2018-10-15  8:03       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 03/12] virtio: init " wexu
2018-10-15  3:10   ` Jason Wang [this message]
2018-10-15  7:09     ` Wei Xu
2018-10-15  7:54       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 04/12] virtio: init wrap counter " wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 05/12] virtio: init and desc empty check " wexu
2018-10-15  3:18   ` Jason Wang
2018-10-15  7:04     ` Wei Xu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 06/12] virtio: get avail bytes " wexu
2018-10-15  3:47   ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 07/12] virtio: fill/flush/pop " wexu
2018-10-15  6:14   ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 08/12] virtio: event suppression support " wexu
2018-10-15  6:55   ` Jason Wang
2018-10-15  6:59   ` Jason Wang
2018-10-15  8:20     ` Wei Xu
2018-10-15  9:11       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 09/12] virtio-net: fill head desc after done all in a chain wexu
2018-10-15  7:45   ` Jason Wang
2018-10-15  8:03     ` Wei Xu
2018-10-15  8:05       ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 10/12] virtio: packed ring feature bit for userspace backend wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 11/12] virtio: enable packed ring via a new command line wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 12/12] virtio: feature vhost-net support for packed ring wexu
2018-10-15  7:50   ` Jason Wang
2018-10-15  8:11     ` Wei Xu
2018-11-21 13:03   ` Maxime Coquelin
2018-11-22  3:46     ` Wei Xu
2018-11-21 14:39 ` [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support Tiwei Bie
2018-11-22  3:43   ` Wei Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=fef2ece0-03ca-3eef-3c21-31ee0a6440b7@redhat.com \
    --to=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tiwei.bie@intel.com \
    --cc=wexu@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.