All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <Paul.Durrant@citrix.com>
To: Paul Durrant <Paul.Durrant@citrix.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
	"qemu-block@nongnu.org" <qemu-block@nongnu.org>,
	"qemu-devel@nongnu.org" <qemu-devel@nongnu.org>
Cc: Anthony Perard <anthony.perard@citrix.com>,
	Kevin Wolf <kwolf@redhat.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Max Reitz <mreitz@redhat.com>
Subject: Re: [PATCH v2 7/8] xen_disk: use a single entry iovec
Date: Fri, 4 May 2018 15:21:29 +0000	[thread overview]
Message-ID: <80fb47d07ec1483f889319885c8869e9__20419.4493046545$1525447211$gmane$org@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <1525442134-20488-8-git-send-email-paul.durrant@citrix.com>

> -----Original Message-----
> From: Paul Durrant [mailto:paul.durrant@citrix.com]
> Sent: 04 May 2018 14:56
> To: xen-devel@lists.xenproject.org; qemu-block@nongnu.org; qemu-
> devel@nongnu.org
> Cc: Paul Durrant <Paul.Durrant@citrix.com>; Stefano Stabellini
> <sstabellini@kernel.org>; Anthony Perard <anthony.perard@citrix.com>;
> Kevin Wolf <kwolf@redhat.com>; Max Reitz <mreitz@redhat.com>
> Subject: [PATCH v2 7/8] xen_disk: use a single entry iovec
> 
> Since xen_disk now always copies data to and from a guest there is no need
> to maintain a vector entry corresponding to every page of a request.
> This means there is less per-request state to maintain so the ioreq
> structure can shrink significantly.
> 
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Anthony Perard <anthony.perard@citrix.com>
> Cc: Kevin Wolf <kwolf@redhat.com>
> Cc: Max Reitz <mreitz@redhat.com>
> 
> v2:
>  - Re-based

Unfortunately I managed to drop a hunk during rebase and so this patch is actually broken. I'll send a rectified v3 shortly.

  Paul

> ---
>  hw/block/xen_disk.c | 71 ++++++++++++++---------------------------------------
>  1 file changed, 18 insertions(+), 53 deletions(-)
> 
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 28be8b6..230961f 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -46,13 +46,10 @@ struct ioreq {
>      /* parsed request */
>      off_t               start;
>      QEMUIOVector        v;
> +    void                *buf;
> +    size_t              size;
>      int                 presync;
> 
> -    /* grant mapping */
> -    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
> -    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
> -    void                *pages;
> -
>      /* aio status */
>      int                 aio_inflight;
>      int                 aio_errors;
> @@ -110,12 +107,10 @@ static void ioreq_reset(struct ioreq *ioreq)
>      memset(&ioreq->req, 0, sizeof(ioreq->req));
>      ioreq->status = 0;
>      ioreq->start = 0;
> +    ioreq->buf = NULL;
> +    ioreq->size = 0;
>      ioreq->presync = 0;
> 
> -    memset(ioreq->refs, 0, sizeof(ioreq->refs));
> -    memset(ioreq->page, 0, sizeof(ioreq->page));
> -    ioreq->pages = NULL;
> -
>      ioreq->aio_inflight = 0;
>      ioreq->aio_errors = 0;
> 
> @@ -138,7 +133,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev
> *blkdev)
>          ioreq = g_malloc0(sizeof(*ioreq));
>          ioreq->blkdev = blkdev;
>          blkdev->requests_total++;
> -        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
> +        qemu_iovec_init(&ioreq->v, 1);
>      } else {
>          /* get one from freelist */
>          ioreq = QLIST_FIRST(&blkdev->freelist);
> @@ -183,7 +178,6 @@ static void ioreq_release(struct ioreq *ioreq, bool
> finish)
>  static int ioreq_parse(struct ioreq *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> -    uintptr_t mem;
>      size_t len;
>      int i;
> 
> @@ -230,13 +224,10 @@ static int ioreq_parse(struct ioreq *ioreq)
>              goto err;
>          }
> 
> -        ioreq->refs[i]   = ioreq->req.seg[i].gref;
> -
> -        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
>          len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) *
> blkdev->file_blk;
> -        qemu_iovec_add(&ioreq->v, (void*)mem, len);
> +        ioreq->size += len;
>      }
> -    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
> +    if (ioreq->start + ioreq->size > blkdev->file_size) {
>          xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of
> file\n");
>          goto err;
>      }
> @@ -247,35 +238,6 @@ err:
>      return -1;
>  }
> 
> -static void ioreq_free_copy_buffers(struct ioreq *ioreq)
> -{
> -    int i;
> -
> -    for (i = 0; i < ioreq->v.niov; i++) {
> -        ioreq->page[i] = NULL;
> -    }
> -
> -    qemu_vfree(ioreq->pages);
> -}
> -
> -static int ioreq_init_copy_buffers(struct ioreq *ioreq)
> -{
> -    int i;
> -
> -    if (ioreq->v.niov == 0) {
> -        return 0;
> -    }
> -
> -    ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov *
> XC_PAGE_SIZE);
> -
> -    for (i = 0; i < ioreq->v.niov; i++) {
> -        ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
> -        ioreq->v.iov[i].iov_base = ioreq->page[i];
> -    }
> -
> -    return 0;
> -}
> -
>  static int ioreq_grant_copy(struct ioreq *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> @@ -284,6 +246,7 @@ static int ioreq_grant_copy(struct ioreq *ioreq)
>      int i, count, rc;
>      int64_t file_blk = ioreq->blkdev->file_blk;
>      bool to_domain = (ioreq->req.operation == BLKIF_OP_READ);
> +    void *virt = ioreq->buf;
> 
>      if (ioreq->v.niov == 0) {
>          return 0;
> @@ -293,16 +256,17 @@ static int ioreq_grant_copy(struct ioreq *ioreq)
> 
>      for (i = 0; i < count; i++) {
>          if (to_domain) {
> -            segs[i].dest.foreign.ref = ioreq->refs[i];
> +            segs[i].dest.foreign.ref = ioreq->req.seg[i].gref;
>              segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
> -            segs[i].source.virt = ioreq->v.iov[i].iov_base;
> +            segs[i].source.virt = virt;
>          } else {
> -            segs[i].source.foreign.ref = ioreq->refs[i];
> +            segs[i].source.foreign.ref = ioreq->req.seg[i].gref;
>              segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
> -            segs[i].dest.virt = ioreq->v.iov[i].iov_base;
> +            segs[i].dest.virt = virt;
>          }
>          segs[i].len = (ioreq->req.seg[i].last_sect
>                         - ioreq->req.seg[i].first_sect + 1) * file_blk;
> +        virt += segs[i].len;
>      }
> 
>      rc = xen_be_copy_grant_refs(xendev, to_domain, segs, count);
> @@ -314,6 +278,7 @@ static int ioreq_grant_copy(struct ioreq *ioreq)
>          return -1;
>      }
> 
> +    qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
>      return rc;
>  }
> 
> @@ -348,14 +313,14 @@ static void qemu_aio_complete(void *opaque, int
> ret)
>          if (ret == 0) {
>              ioreq_grant_copy(ioreq);
>          }
> -        ioreq_free_copy_buffers(ioreq);
> +        qemu_vfree(ioreq->buf);
>          break;
>      case BLKIF_OP_WRITE:
>      case BLKIF_OP_FLUSH_DISKCACHE:
>          if (!ioreq->req.nr_segments) {
>              break;
>          }
> -        ioreq_free_copy_buffers(ioreq);
> +        qemu_vfree(ioreq->buf);
>          break;
>      default:
>          break;
> @@ -423,12 +388,12 @@ static int ioreq_runio_qemu_aio(struct ioreq
> *ioreq)
>  {
>      struct XenBlkDev *blkdev = ioreq->blkdev;
> 
> -    ioreq_init_copy_buffers(ioreq);
> +    ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
>      if (ioreq->req.nr_segments &&
>          (ioreq->req.operation == BLKIF_OP_WRITE ||
>           ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
>          ioreq_grant_copy(ioreq)) {
> -        ioreq_free_copy_buffers(ioreq);
> +        qemu_vfree(ioreq->buf);
>          goto err;
>      }
> 
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-05-04 15:21 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-04 13:55 [Qemu-devel] [PATCH v2 0/8] xen_disk: legacy code removal and cleanup Paul Durrant
2018-05-04 13:55 ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 1/8] xen_backend: add grant table helpers Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 2/8] xen_disk: remove open-coded use of libxengnttab Paul Durrant
2018-05-04 13:55 ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 3/8] xen: remove other " Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 4/8] xen_backend: add an emulation of grant copy Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 5/8] xen_disk: remove use of grant map/unmap Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 6/8] xen_backend: make the xen_feature_grant_copy flag private Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 7/8] xen_disk: use a single entry iovec Paul Durrant
2018-05-04 13:55   ` Paul Durrant
2018-05-04 15:21   ` [Qemu-devel] " Paul Durrant
2018-05-04 15:21   ` Paul Durrant [this message]
2018-05-04 13:55 ` [Qemu-devel] [PATCH v2 8/8] xen_disk: be consistent with use of xendev and blkdev->xendev Paul Durrant
2018-05-04 13:55   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='80fb47d07ec1483f889319885c8869e9__20419.4493046545$1525447211$gmane$org@AMSPEX02CL03.citrite.net' \
    --to=paul.durrant@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.