All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org, qemu-block@nongnu.org,
	qemu-devel@nongnu.org
Cc: Paul Durrant <paul.durrant@citrix.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Anthony Perard <anthony.perard@citrix.com>,
	Kevin Wolf <kwolf@redhat.com>, Max Reitz <mreitz@redhat.com>
Subject: [Qemu-devel] [PATCH 2/4] block/xen_disk: remove use of grant map/unmap
Date: Mon, 30 Apr 2018 13:01:37 +0100	[thread overview]
Message-ID: <1525089699-13411-3-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com>

The grant copy operation was added to libxengnttab in Xen 4.8.0. If grant
copy is available then data from the guest will be copied rather than
mapped.
The xen_disk source can be significantly simplified by removing this now
redundant code.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Anthony Perard <anthony.perard@citrix.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Max Reitz <mreitz@redhat.com>
---
 hw/block/xen_disk.c | 194 ++++++++--------------------------------------------
 1 file changed, 27 insertions(+), 167 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index b33611a..8f4e229 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -36,10 +36,6 @@
 
 /* ------------------------------------------------------------- */
 
-static int batch_maps   = 0;
-
-/* ------------------------------------------------------------- */
-
 #define BLOCK_SIZE  512
 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
 
@@ -51,12 +47,9 @@ struct ioreq {
     off_t               start;
     QEMUIOVector        v;
     int                 presync;
-    uint8_t             mapped;
 
-    /* grant mapping */
     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int                 prot;
     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     void                *pages;
 
@@ -89,7 +82,6 @@ struct XenBlkDev {
     int                 protocol;
     blkif_back_rings_t  rings;
     int                 more_work;
-    int                 cnt_map;
 
     /* request lists */
     QLIST_HEAD(inflight_head, ioreq) inflight;
@@ -119,11 +111,9 @@ static void ioreq_reset(struct ioreq *ioreq)
     ioreq->status = 0;
     ioreq->start = 0;
     ioreq->presync = 0;
-    ioreq->mapped = 0;
 
     memset(ioreq->domids, 0, sizeof(ioreq->domids));
     memset(ioreq->refs, 0, sizeof(ioreq->refs));
-    ioreq->prot = 0;
     memset(ioreq->page, 0, sizeof(ioreq->page));
     ioreq->pages = NULL;
 
@@ -204,7 +194,6 @@ static int ioreq_parse(struct ioreq *ioreq)
                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
     switch (ioreq->req.operation) {
     case BLKIF_OP_READ:
-        ioreq->prot = PROT_WRITE; /* to memory */
         break;
     case BLKIF_OP_FLUSH_DISKCACHE:
         ioreq->presync = 1;
@@ -213,7 +202,6 @@ static int ioreq_parse(struct ioreq *ioreq)
         }
         /* fall through */
     case BLKIF_OP_WRITE:
-        ioreq->prot = PROT_READ; /* from memory */
         break;
     case BLKIF_OP_DISCARD:
         return 0;
@@ -261,88 +249,6 @@ err:
     return -1;
 }
 
-static void ioreq_unmap(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    int i;
-
-    if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
-        return;
-    }
-    if (batch_maps) {
-        if (!ioreq->pages) {
-            return;
-        }
-        if (xengnttab_unmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "xengnttab_unmap failed: %s\n",
-                          strerror(errno));
-        }
-        ioreq->blkdev->cnt_map -= ioreq->v.niov;
-        ioreq->pages = NULL;
-    } else {
-        for (i = 0; i < ioreq->v.niov; i++) {
-            if (!ioreq->page[i]) {
-                continue;
-            }
-            if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "xengnttab_unmap failed: %s\n",
-                              strerror(errno));
-            }
-            ioreq->blkdev->cnt_map--;
-            ioreq->page[i] = NULL;
-        }
-    }
-    ioreq->mapped = 0;
-}
-
-static int ioreq_map(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    int i;
-
-    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
-        return 0;
-    }
-    if (batch_maps) {
-        ioreq->pages = xengnttab_map_grant_refs
-            (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
-        if (ioreq->pages == NULL) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "can't map %d grant refs (%s, %d maps)\n",
-                          ioreq->v.niov, strerror(errno),
-                          ioreq->blkdev->cnt_map);
-            return -1;
-        }
-        for (i = 0; i < ioreq->v.niov; i++) {
-            ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
-                (uintptr_t)ioreq->v.iov[i].iov_base;
-        }
-        ioreq->blkdev->cnt_map += ioreq->v.niov;
-    } else  {
-        for (i = 0; i < ioreq->v.niov; i++) {
-            ioreq->page[i] = xengnttab_map_grant_ref
-                (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
-            if (ioreq->page[i] == NULL) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "can't map grant ref %d (%s, %d maps)\n",
-                              ioreq->refs[i], strerror(errno),
-                              ioreq->blkdev->cnt_map);
-                ioreq->mapped = 1;
-                ioreq_unmap(ioreq);
-                return -1;
-            }
-            ioreq->v.iov[i].iov_base = ioreq->page[i] +
-                (uintptr_t)ioreq->v.iov[i].iov_base;
-        }
-    }
-    ioreq->mapped = 1;
-    return 0;
-}
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
-
 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
 {
     int i;
@@ -424,22 +330,6 @@ static int ioreq_grant_copy(struct ioreq *ioreq)
 
     return rc;
 }
-#else
-static void ioreq_free_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_init_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_grant_copy(struct ioreq *ioreq)
-{
-    abort();
-}
-#endif
 
 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
 
@@ -466,32 +356,28 @@ static void qemu_aio_complete(void *opaque, int ret)
         goto done;
     }
 
-    if (xen_feature_grant_copy) {
-        switch (ioreq->req.operation) {
-        case BLKIF_OP_READ:
-            /* in case of failure ioreq->aio_errors is increased */
-            if (ret == 0) {
-                ioreq_grant_copy(ioreq);
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        case BLKIF_OP_WRITE:
-        case BLKIF_OP_FLUSH_DISKCACHE:
-            if (!ioreq->req.nr_segments) {
-                break;
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        default:
+    switch (ioreq->req.operation) {
+    case BLKIF_OP_READ:
+        /* in case of failure ioreq->aio_errors is increased */
+        if (ret == 0) {
+            ioreq_grant_copy(ioreq);
+        }
+        ioreq_free_copy_buffers(ioreq);
+        break;
+    case BLKIF_OP_WRITE:
+    case BLKIF_OP_FLUSH_DISKCACHE:
+        if (!ioreq->req.nr_segments) {
             break;
         }
+        ioreq_free_copy_buffers(ioreq);
+        break;
+    default:
+        break;
     }
 
     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
-    if (!xen_feature_grant_copy) {
-        ioreq_unmap(ioreq);
-    }
     ioreq_finish(ioreq);
+
     switch (ioreq->req.operation) {
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
@@ -551,18 +437,13 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
 
-    if (xen_feature_grant_copy) {
-        ioreq_init_copy_buffers(ioreq);
-        if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
-            ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-            ioreq_grant_copy(ioreq)) {
-                ioreq_free_copy_buffers(ioreq);
-                goto err;
-        }
-    } else {
-        if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
-            goto err;
-        }
+    ioreq_init_copy_buffers(ioreq);
+    if (ioreq->req.nr_segments &&
+        (ioreq->req.operation == BLKIF_OP_WRITE ||
+         ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+        ioreq_grant_copy(ioreq)) {
+        ioreq_free_copy_buffers(ioreq);
+        goto err;
     }
 
     ioreq->aio_inflight++;
@@ -603,9 +484,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     }
     default:
         /* unknown operation (shouldn't happen -- parse catches this) */
-        if (!xen_feature_grant_copy) {
-            ioreq_unmap(ioreq);
-        }
         goto err;
     }
 
@@ -791,10 +669,6 @@ static void blk_alloc(struct XenDevice *xendev)
 
     blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
     blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
-
-    if (xen_mode != XEN_EMULATE) {
-        batch_maps = 1;
-    }
 }
 
 static void blk_parse_discard(struct XenBlkDev *blkdev)
@@ -877,8 +751,9 @@ static int blk_init(struct XenDevice *xendev)
 
     blkdev->file_blk  = BLOCK_SIZE;
 
-    xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
-                  xen_feature_grant_copy ? "enabled" : "disabled");
+    if (!xen_feature_grant_copy) {
+        goto out_error;
+    }
 
     /* fill info
      * blk_connect supplies sector-size and sectors
@@ -910,15 +785,6 @@ out_error:
     return -1;
 }
 
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- *     2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
 static int blk_connect(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
@@ -1079,11 +945,8 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    /* Calculate the maximum number of grants needed by ioreqs */
-    max_grants = MAX_GRANTS(blkdev->max_requests,
-                            BLKIF_MAX_SEGMENTS_PER_REQUEST);
     /* Add on the number needed for the ring pages */
-    max_grants += blkdev->nr_ring_ref;
+    max_grants = blkdev->nr_ring_ref;
 
     blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
     if (blkdev->xendev.gnttabdev == NULL) {
@@ -1114,8 +977,6 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    blkdev->cnt_map++;
-
     switch (blkdev->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
@@ -1171,7 +1032,6 @@ static void blk_disconnect(struct XenDevice *xendev)
     if (blkdev->sring) {
         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
                         blkdev->nr_ring_ref);
-        blkdev->cnt_map--;
         blkdev->sring = NULL;
     }
 
-- 
2.1.4

WARNING: multiple messages have this Message-ID (diff)
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org, qemu-block@nongnu.org,
	qemu-devel@nongnu.org
Cc: Anthony Perard <anthony.perard@citrix.com>,
	Kevin Wolf <kwolf@redhat.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Max Reitz <mreitz@redhat.com>
Subject: [PATCH 2/4] block/xen_disk: remove use of grant map/unmap
Date: Mon, 30 Apr 2018 13:01:37 +0100	[thread overview]
Message-ID: <1525089699-13411-3-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com>

The grant copy operation was added to libxengnttab in Xen 4.8.0. If grant
copy is available then data from the guest will be copied rather than
mapped.
The xen_disk source can be significantly simplified by removing this now
redundant code.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Anthony Perard <anthony.perard@citrix.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Max Reitz <mreitz@redhat.com>
---
 hw/block/xen_disk.c | 194 ++++++++--------------------------------------------
 1 file changed, 27 insertions(+), 167 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index b33611a..8f4e229 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -36,10 +36,6 @@
 
 /* ------------------------------------------------------------- */
 
-static int batch_maps   = 0;
-
-/* ------------------------------------------------------------- */
-
 #define BLOCK_SIZE  512
 #define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
 
@@ -51,12 +47,9 @@ struct ioreq {
     off_t               start;
     QEMUIOVector        v;
     int                 presync;
-    uint8_t             mapped;
 
-    /* grant mapping */
     uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-    int                 prot;
     void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     void                *pages;
 
@@ -89,7 +82,6 @@ struct XenBlkDev {
     int                 protocol;
     blkif_back_rings_t  rings;
     int                 more_work;
-    int                 cnt_map;
 
     /* request lists */
     QLIST_HEAD(inflight_head, ioreq) inflight;
@@ -119,11 +111,9 @@ static void ioreq_reset(struct ioreq *ioreq)
     ioreq->status = 0;
     ioreq->start = 0;
     ioreq->presync = 0;
-    ioreq->mapped = 0;
 
     memset(ioreq->domids, 0, sizeof(ioreq->domids));
     memset(ioreq->refs, 0, sizeof(ioreq->refs));
-    ioreq->prot = 0;
     memset(ioreq->page, 0, sizeof(ioreq->page));
     ioreq->pages = NULL;
 
@@ -204,7 +194,6 @@ static int ioreq_parse(struct ioreq *ioreq)
                   ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
     switch (ioreq->req.operation) {
     case BLKIF_OP_READ:
-        ioreq->prot = PROT_WRITE; /* to memory */
         break;
     case BLKIF_OP_FLUSH_DISKCACHE:
         ioreq->presync = 1;
@@ -213,7 +202,6 @@ static int ioreq_parse(struct ioreq *ioreq)
         }
         /* fall through */
     case BLKIF_OP_WRITE:
-        ioreq->prot = PROT_READ; /* from memory */
         break;
     case BLKIF_OP_DISCARD:
         return 0;
@@ -261,88 +249,6 @@ err:
     return -1;
 }
 
-static void ioreq_unmap(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    int i;
-
-    if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
-        return;
-    }
-    if (batch_maps) {
-        if (!ioreq->pages) {
-            return;
-        }
-        if (xengnttab_unmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "xengnttab_unmap failed: %s\n",
-                          strerror(errno));
-        }
-        ioreq->blkdev->cnt_map -= ioreq->v.niov;
-        ioreq->pages = NULL;
-    } else {
-        for (i = 0; i < ioreq->v.niov; i++) {
-            if (!ioreq->page[i]) {
-                continue;
-            }
-            if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "xengnttab_unmap failed: %s\n",
-                              strerror(errno));
-            }
-            ioreq->blkdev->cnt_map--;
-            ioreq->page[i] = NULL;
-        }
-    }
-    ioreq->mapped = 0;
-}
-
-static int ioreq_map(struct ioreq *ioreq)
-{
-    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
-    int i;
-
-    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
-        return 0;
-    }
-    if (batch_maps) {
-        ioreq->pages = xengnttab_map_grant_refs
-            (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
-        if (ioreq->pages == NULL) {
-            xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                          "can't map %d grant refs (%s, %d maps)\n",
-                          ioreq->v.niov, strerror(errno),
-                          ioreq->blkdev->cnt_map);
-            return -1;
-        }
-        for (i = 0; i < ioreq->v.niov; i++) {
-            ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
-                (uintptr_t)ioreq->v.iov[i].iov_base;
-        }
-        ioreq->blkdev->cnt_map += ioreq->v.niov;
-    } else  {
-        for (i = 0; i < ioreq->v.niov; i++) {
-            ioreq->page[i] = xengnttab_map_grant_ref
-                (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
-            if (ioreq->page[i] == NULL) {
-                xen_pv_printf(&ioreq->blkdev->xendev, 0,
-                              "can't map grant ref %d (%s, %d maps)\n",
-                              ioreq->refs[i], strerror(errno),
-                              ioreq->blkdev->cnt_map);
-                ioreq->mapped = 1;
-                ioreq_unmap(ioreq);
-                return -1;
-            }
-            ioreq->v.iov[i].iov_base = ioreq->page[i] +
-                (uintptr_t)ioreq->v.iov[i].iov_base;
-        }
-    }
-    ioreq->mapped = 1;
-    return 0;
-}
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
-
 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
 {
     int i;
@@ -424,22 +330,6 @@ static int ioreq_grant_copy(struct ioreq *ioreq)
 
     return rc;
 }
-#else
-static void ioreq_free_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_init_copy_buffers(struct ioreq *ioreq)
-{
-    abort();
-}
-
-static int ioreq_grant_copy(struct ioreq *ioreq)
-{
-    abort();
-}
-#endif
 
 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
 
@@ -466,32 +356,28 @@ static void qemu_aio_complete(void *opaque, int ret)
         goto done;
     }
 
-    if (xen_feature_grant_copy) {
-        switch (ioreq->req.operation) {
-        case BLKIF_OP_READ:
-            /* in case of failure ioreq->aio_errors is increased */
-            if (ret == 0) {
-                ioreq_grant_copy(ioreq);
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        case BLKIF_OP_WRITE:
-        case BLKIF_OP_FLUSH_DISKCACHE:
-            if (!ioreq->req.nr_segments) {
-                break;
-            }
-            ioreq_free_copy_buffers(ioreq);
-            break;
-        default:
+    switch (ioreq->req.operation) {
+    case BLKIF_OP_READ:
+        /* in case of failure ioreq->aio_errors is increased */
+        if (ret == 0) {
+            ioreq_grant_copy(ioreq);
+        }
+        ioreq_free_copy_buffers(ioreq);
+        break;
+    case BLKIF_OP_WRITE:
+    case BLKIF_OP_FLUSH_DISKCACHE:
+        if (!ioreq->req.nr_segments) {
             break;
         }
+        ioreq_free_copy_buffers(ioreq);
+        break;
+    default:
+        break;
     }
 
     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
-    if (!xen_feature_grant_copy) {
-        ioreq_unmap(ioreq);
-    }
     ioreq_finish(ioreq);
+
     switch (ioreq->req.operation) {
     case BLKIF_OP_WRITE:
     case BLKIF_OP_FLUSH_DISKCACHE:
@@ -551,18 +437,13 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
 
-    if (xen_feature_grant_copy) {
-        ioreq_init_copy_buffers(ioreq);
-        if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
-            ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-            ioreq_grant_copy(ioreq)) {
-                ioreq_free_copy_buffers(ioreq);
-                goto err;
-        }
-    } else {
-        if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
-            goto err;
-        }
+    ioreq_init_copy_buffers(ioreq);
+    if (ioreq->req.nr_segments &&
+        (ioreq->req.operation == BLKIF_OP_WRITE ||
+         ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+        ioreq_grant_copy(ioreq)) {
+        ioreq_free_copy_buffers(ioreq);
+        goto err;
     }
 
     ioreq->aio_inflight++;
@@ -603,9 +484,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     }
     default:
         /* unknown operation (shouldn't happen -- parse catches this) */
-        if (!xen_feature_grant_copy) {
-            ioreq_unmap(ioreq);
-        }
         goto err;
     }
 
@@ -791,10 +669,6 @@ static void blk_alloc(struct XenDevice *xendev)
 
     blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
     blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
-
-    if (xen_mode != XEN_EMULATE) {
-        batch_maps = 1;
-    }
 }
 
 static void blk_parse_discard(struct XenBlkDev *blkdev)
@@ -877,8 +751,9 @@ static int blk_init(struct XenDevice *xendev)
 
     blkdev->file_blk  = BLOCK_SIZE;
 
-    xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
-                  xen_feature_grant_copy ? "enabled" : "disabled");
+    if (!xen_feature_grant_copy) {
+        goto out_error;
+    }
 
     /* fill info
      * blk_connect supplies sector-size and sectors
@@ -910,15 +785,6 @@ out_error:
     return -1;
 }
 
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- *     2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
 static int blk_connect(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
@@ -1079,11 +945,8 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    /* Calculate the maximum number of grants needed by ioreqs */
-    max_grants = MAX_GRANTS(blkdev->max_requests,
-                            BLKIF_MAX_SEGMENTS_PER_REQUEST);
     /* Add on the number needed for the ring pages */
-    max_grants += blkdev->nr_ring_ref;
+    max_grants = blkdev->nr_ring_ref;
 
     blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
     if (blkdev->xendev.gnttabdev == NULL) {
@@ -1114,8 +977,6 @@ static int blk_connect(struct XenDevice *xendev)
         return -1;
     }
 
-    blkdev->cnt_map++;
-
     switch (blkdev->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
@@ -1171,7 +1032,6 @@ static void blk_disconnect(struct XenDevice *xendev)
     if (blkdev->sring) {
         xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
                         blkdev->nr_ring_ref);
-        blkdev->cnt_map--;
         blkdev->sring = NULL;
     }
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-04-30 12:34 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-30 12:01 [Qemu-devel] [PATCH 0/4] block/xen_disk: legacy code removal and cleanup Paul Durrant
2018-04-30 12:01 ` Paul Durrant
2018-04-30 12:01 ` [Qemu-devel] [PATCH 1/4] block/xen_disk: remove persistent grant code Paul Durrant
2018-04-30 12:01   ` Paul Durrant
2018-04-30 12:01 ` Paul Durrant [this message]
2018-04-30 12:01   ` [PATCH 2/4] block/xen_disk: remove use of grant map/unmap Paul Durrant
2018-04-30 15:12   ` [Qemu-devel] [Xen-devel] " Roger Pau Monné
2018-04-30 15:12     ` Roger Pau Monné
2018-04-30 15:16     ` [Qemu-devel] [Xen-devel] " Paul Durrant
2018-04-30 15:16       ` Paul Durrant
2018-04-30 15:28       ` [Qemu-devel] [Xen-devel] " Roger Pau Monné
2018-04-30 15:28         ` Roger Pau Monné
2018-04-30 15:30         ` [Qemu-devel] [Xen-devel] " Paul Durrant
2018-04-30 15:30           ` Paul Durrant
2018-05-01 10:29           ` Wei Liu
2018-05-01 10:29           ` [Qemu-devel] [Xen-devel] " Wei Liu
2018-05-01 10:31             ` Paul Durrant
2018-05-01 10:31             ` [Qemu-devel] [Xen-devel] " Paul Durrant
2018-04-30 12:01 ` [Qemu-devel] [PATCH 3/4] block/xen_disk: use a single entry iovec Paul Durrant
2018-04-30 12:01   ` Paul Durrant
2018-04-30 12:01 ` [Qemu-devel] [PATCH 4/4] block/xen_disk: be consistent with use of xendev and blkdev->xendev Paul Durrant
2018-04-30 12:01   ` Paul Durrant
2018-05-02 15:58 ` [PATCH 0/4] block/xen_disk: legacy code removal and cleanup Anthony PERARD
2018-05-02 15:58 ` [Qemu-devel] " Anthony PERARD
2018-05-02 16:03   ` Paul Durrant
2018-05-02 16:03   ` [Qemu-devel] " Paul Durrant
2018-05-03  9:55     ` Anthony PERARD
2018-05-03  9:55     ` Anthony PERARD

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1525089699-13411-3-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.