From: Paulina Szubarczyk <paulinaszubarczyk@gmail.com>
To: xen-devel@lists.xenproject.org, roger.pau@citrix.com
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
Paulina Szubarczyk <paulinaszubarczyk@gmail.com>,
ian.jackson@eu.citrix.com, P.Gawkowski@ii.pw.edu.pl,
dvrabel@cantab.net, anthony.perard@citrix.com
Subject: [PATCH v2 2/2] qdisk - hw/block/xen_disk: grant copy implementation
Date: Mon, 13 Jun 2016 11:43:56 +0200 [thread overview]
Message-ID: <1465811036-17026-3-git-send-email-paulinaszubarczyk@gmail.com> (raw)
In-Reply-To: <1465811036-17026-1-git-send-email-paulinaszubarczyk@gmail.com>
Copy data operated on during request from/to local buffers to/from
the grant references.
Before grant copy operation local buffers must be allocated what is
done by calling ioreq_init_copy_buffers. For the 'read' operation,
first, the qemu device invokes the read operation on local buffers
and on the completion grant copy is called and buffers are freed.
For the 'write' operation grant copy is performed before invoking
write by qemu device.
A new value 'feature_grant_copy' is added to recognize when the
grant copy operation is supported by a guest.
The body of the function 'ioreq_runio_qemu_aio' is moved to
'ioreq_runio_qemu_aio_blk' and in the 'ioreq_runio_qemu_aio' depending
on the support for grant copy according checks, initialization, grant
operation are made, then the 'ioreq_runio_qemu_aio_blk' function is
called.
Signed-off-by: Paulina Szubarczyk <paulinaszubarczyk@gmail.com>
---
Changes since v1:
- removed the 'ioreq_write','ioreq_read_init','ioreq_read' functions
- implemented 'ioreq_init_copy_buffers', 'ioreq_copy'
- reverted the removal of grant map and introduced conditional invoking
grant copy or grant map
- resigned from caching the local buffers on behalf of allocating the
required amount of pages at once. The cached structure would require
to have an lock guard and I suppose that the performance improvement
would degraded.
hw/block/xen_disk.c | 175 ++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 163 insertions(+), 12 deletions(-)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 37e14d1..af6b8c7 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -131,6 +131,9 @@ struct XenBlkDev {
unsigned int persistent_gnt_count;
unsigned int max_grants;
+ /* Grant copy */
+ gboolean feature_grant_copy;
+
/* qemu block driver */
DriveInfo *dinfo;
BlockBackend *blk;
@@ -500,6 +503,100 @@ static int ioreq_map(struct ioreq *ioreq)
return 0;
}
+static void* get_buffer(int count)
+{
+ return xc_memalign(xen_xc, XC_PAGE_SIZE, count*XC_PAGE_SIZE);
+}
+
+static void free_buffers(struct ioreq *ioreq)
+{
+ int i;
+
+ for (i = 0; i < ioreq->v.niov; i++) {
+ ioreq->page[i] = NULL;
+ }
+
+ free(ioreq->pages);
+}
+
+static int ioreq_init_copy_buffers(struct ioreq *ioreq) {
+ int i;
+
+ if (ioreq->v.niov == 0) {
+ return 0;
+ }
+
+ ioreq->pages = get_buffer(ioreq->v.niov);
+ if (!ioreq->pages) {
+ return -1;
+ }
+
+ for (i = 0; i < ioreq->v.niov; i++) {
+ ioreq->page[i] = ioreq->pages + i*XC_PAGE_SIZE;
+ ioreq->v.iov[i].iov_base += (uintptr_t)ioreq->page[i];
+ }
+
+ return 0;
+}
+
+static int ioreq_copy(struct ioreq *ioreq)
+{
+ XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
+ xc_gnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int i, count = 0, r, rc;
+ int64_t file_blk = ioreq->blkdev->file_blk;
+
+ if (ioreq->v.niov == 0) {
+ r = 0; goto out;
+ }
+
+ count = ioreq->v.niov;
+
+ for (i = 0; i < count; i++) {
+
+ xc_gnttab_grant_copy_ptr_t *from, *to;
+
+ if (ioreq->req.operation == BLKIF_OP_READ) {
+ segs[i].flags = GNTCOPY_dest_gref;
+ from = &(segs[i].dest);
+ to = &(segs[i].source);
+ } else {
+ segs[i].flags = GNTCOPY_source_gref;
+ from = &(segs[i].source);
+ to = &(segs[i].dest);
+ }
+ segs[i].len = (ioreq->req.seg[i].last_sect
+ - ioreq->req.seg[i].first_sect + 1) * file_blk;
+ from->foreign.ref = ioreq->refs[i];
+ from->foreign.domid = ioreq->domids[i];
+ from->foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
+ to->virt = ioreq->v.iov[i].iov_base;
+ }
+
+ rc = xc_gnttab_grant_copy(gnt, count, segs);
+
+ if (rc) {
+ xen_be_printf(&ioreq->blkdev->xendev, 0,
+ "failed to copy data %d \n", rc);
+ ioreq->aio_errors++;
+ r = -1; goto out;
+ } else {
+ r = 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (segs[i].status != GNTST_okay) {
+ xen_be_printf(&ioreq->blkdev->xendev, 0,
+ "failed to copy data %d for gref %d, domid %d\n", rc,
+ ioreq->refs[i], ioreq->domids[i]);
+ ioreq->aio_errors++;
+ r = -1;
+ }
+ }
+out:
+ return r;
+}
+
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
static void qemu_aio_complete(void *opaque, int ret)
@@ -521,6 +618,7 @@ static void qemu_aio_complete(void *opaque, int ret)
if (ioreq->aio_inflight > 0) {
return;
}
+
if (ioreq->postsync) {
ioreq->postsync = 0;
ioreq->aio_inflight++;
@@ -528,8 +626,32 @@ static void qemu_aio_complete(void *opaque, int ret)
return;
}
+ if (ioreq->blkdev->feature_grant_copy) {
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ /* in case of failure ioreq->aio_errors is increased
+ * and it is logged */
+ ioreq_copy(ioreq);
+ free_buffers(ioreq);
+ break;
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
+ break;
+ }
+ free_buffers(ioreq);
+ break;
+ default:
+ break;
+ }
+ }
+
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
- ioreq_unmap(ioreq);
+
+ if (!ioreq->blkdev->feature_grant_copy) {
+ ioreq_unmap(ioreq);
+ }
+
ioreq_finish(ioreq);
switch (ioreq->req.operation) {
case BLKIF_OP_WRITE:
@@ -547,14 +669,42 @@ static void qemu_aio_complete(void *opaque, int ret)
qemu_bh_schedule(ioreq->blkdev->bh);
}
+static int ioreq_runio_qemu_aio_blk(struct ioreq *ioreq);
+
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
- struct XenBlkDev *blkdev = ioreq->blkdev;
+ if (ioreq->blkdev->feature_grant_copy) {
+
+ ioreq_init_copy_buffers(ioreq);
+ if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
+ ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE)) {
+ if (ioreq_copy(ioreq)) {
+ free_buffers(ioreq);
+ goto err;
+ }
+ }
+ if (ioreq_runio_qemu_aio_blk(ioreq)) goto err;
- if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
- goto err_no_map;
+ } else {
+
+ if (ioreq->req.nr_segments && ioreq_map(ioreq)) goto err;
+ if (ioreq_runio_qemu_aio_blk(ioreq)) {
+ ioreq_unmap(ioreq);
+ goto err;
+ }
}
+ return 0;
+err:
+ ioreq_finish(ioreq);
+ ioreq->status = BLKIF_RSP_ERROR;
+ return -1;
+}
+
+static int ioreq_runio_qemu_aio_blk(struct ioreq *ioreq)
+{
+ struct XenBlkDev *blkdev = ioreq->blkdev;
+
ioreq->aio_inflight++;
if (ioreq->presync) {
blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
@@ -594,19 +744,12 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
}
default:
/* unknown operation (shouldn't happen -- parse catches this) */
- goto err;
+ return -1;
}
qemu_aio_complete(ioreq, 0);
return 0;
-
-err:
- ioreq_unmap(ioreq);
-err_no_map:
- ioreq_finish(ioreq);
- ioreq->status = BLKIF_RSP_ERROR;
- return -1;
}
static int blk_send_response_one(struct ioreq *ioreq)
@@ -1020,10 +1163,18 @@ static int blk_connect(struct XenDevice *xendev)
xen_be_bind_evtchn(&blkdev->xendev);
+ xc_gnttab_grant_copy_segment_t seg;
+ blkdev->feature_grant_copy =
+ (xc_gnttab_grant_copy(blkdev->xendev.gnttabdev, 0, &seg) == 0);
+
+ xen_be_printf(&blkdev->xendev, 3, "GRANT COPY %s\n",
+ blkdev->feature_grant_copy ? "ENABLED" : "DISABLED");
+
xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
"remote port %d, local port %d\n",
blkdev->xendev.protocol, blkdev->ring_ref,
blkdev->xendev.remote_port, blkdev->xendev.local_port);
+
return 0;
}
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-06-13 9:45 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-13 9:43 [PATCH v2 0/2] qemu-qdisk: Implementation of grant copy operation Paulina Szubarczyk
2016-06-13 9:43 ` [PATCH v2 1/2] libs, libxc: Interface for " Paulina Szubarczyk
2016-06-13 10:04 ` David Vrabel
2016-06-16 12:16 ` Wei Liu
2016-06-16 12:36 ` David Vrabel
2016-06-16 12:50 ` Wei Liu
2016-06-17 16:43 ` Wei Liu
2016-06-17 17:27 ` Paulina Szubarczyk
2016-06-13 9:43 ` Paulina Szubarczyk [this message]
2016-06-13 10:15 ` [PATCH v2 2/2] qdisk - hw/block/xen_disk: grant copy implementation David Vrabel
2016-06-13 10:44 ` Paulina Szubarczyk
2016-06-13 10:58 ` David Vrabel
2016-06-15 16:55 ` Paulina Szubarczyk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1465811036-17026-3-git-send-email-paulinaszubarczyk@gmail.com \
--to=paulinaszubarczyk@gmail.com \
--cc=P.Gawkowski@ii.pw.edu.pl \
--cc=anthony.perard@citrix.com \
--cc=dvrabel@cantab.net \
--cc=ian.jackson@eu.citrix.com \
--cc=roger.pau@citrix.com \
--cc=sstabellini@kernel.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).