All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission
@ 2014-09-04 16:27 Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch Ming Lei
                   ` (5 more replies)
  0 siblings, 6 replies; 8+ messages in thread
From: Ming Lei @ 2014-09-04 16:27 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet

The 1st patch fixes batch submission.

The 2nd one fixes -EAGAIN for non-batch case.

The 3rd one is a cleanup.

The 4th one increase max event to 256 for supporting the comming
multi virt-queue.

This patchset is splitted from previous patchset(dataplane: optimization
and multi virtqueue support), as suggested by Stefan.

These patches have been running well in my box for weeks, and hope
they can be merged soon, and I have some patches which do depend them.

V2:
	- code style fix and commit log fix as suggested by Benoît Canet

V1:
	- rebase on latest QEMU master

 block/linux-aio.c |  131 +++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 103 insertions(+), 28 deletions(-)


Thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
@ 2014-09-04 16:27 ` Ming Lei
  2014-09-09 14:53   ` Benoît Canet
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 2/4] linux-aio: handling -EAGAIN for !s->io_q.plugged case Ming Lei
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 8+ messages in thread
From: Ming Lei @ 2014-09-04 16:27 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet, Ming Lei

In the enqueue path, we can't complete request, otherwise
"Co-routine re-entered recursively" may be caused, so this
patch fixes the issue with the following ideas:

	- for -EAGAIN or partial completion, retry the submission by
	scheduling a BH in following completion cb
	- for part of completion, also update the io queue
	- for other failure, return the failure if in enqueue path,
	otherwise, abort all queued I/O

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 block/linux-aio.c |  106 ++++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 81 insertions(+), 25 deletions(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index 9aca758..a06576d 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -38,11 +38,19 @@ struct qemu_laiocb {
     QLIST_ENTRY(qemu_laiocb) node;
 };
 
-typedef struct {
+/*
+ * TODO: support to batch I/O from multiple bs in one same
+ * AIO context, one important use case is multi-lun scsi,
+ * so in future the IO queue should be per AIO context.
+ */
+typedef struct LaioQueue {
     struct iocb *iocbs[MAX_QUEUED_IO];
     int plugged;
-    unsigned int size;
-    unsigned int idx;
+    uint32 size;
+    uint32 idx;
+
+    /* handle -EAGAIN and partial completion */
+    QEMUBH *retry;
 } LaioQueue;
 
 struct qemu_laio_state {
@@ -138,6 +146,13 @@ static void qemu_laio_completion_bh(void *opaque)
     }
 }
 
+static void qemu_laio_start_retry(struct qemu_laio_state *s)
+{
+    if (s->io_q.idx) {
+        qemu_bh_schedule(s->io_q.retry);
+    }
+}
+
 static void qemu_laio_completion_cb(EventNotifier *e)
 {
     struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
@@ -145,6 +160,7 @@ static void qemu_laio_completion_cb(EventNotifier *e)
     if (event_notifier_test_and_clear(&s->e)) {
         qemu_bh_schedule(s->completion_bh);
     }
+    qemu_laio_start_retry(s);
 }
 
 static void laio_cancel(BlockDriverAIOCB *blockacb)
@@ -164,6 +180,7 @@ static void laio_cancel(BlockDriverAIOCB *blockacb)
     ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
     if (ret == 0) {
         laiocb->ret = -ECANCELED;
+        qemu_laio_start_retry(laiocb->ctx);
         return;
     }
 
@@ -191,45 +208,80 @@ static void ioq_init(LaioQueue *io_q)
     io_q->plugged = 0;
 }
 
-static int ioq_submit(struct qemu_laio_state *s)
+static void abort_queue(struct qemu_laio_state *s)
+{
+    int i;
+    for (i = 0; i < s->io_q.idx; i++) {
+        struct qemu_laiocb *laiocb = container_of(s->io_q.iocbs[i],
+                                                  struct qemu_laiocb,
+                                                  iocb);
+        laiocb->ret = -EIO;
+        qemu_laio_process_completion(s, laiocb);
+    }
+}
+
+static int ioq_submit(struct qemu_laio_state *s, bool enqueue)
 {
     int ret, i = 0;
     int len = s->io_q.idx;
+    int j = 0;
 
-    do {
-        ret = io_submit(s->ctx, len, s->io_q.iocbs);
-    } while (i++ < 3 && ret == -EAGAIN);
+    if (!len) {
+        return 0;
+    }
 
-    /* empty io queue */
-    s->io_q.idx = 0;
+    ret = io_submit(s->ctx, len, s->io_q.iocbs);
+    if (ret == -EAGAIN) { /* retry in following completion cb */
+        return 0;
+    } else if (ret < 0) {
+        if (enqueue) {
+            return ret;
+        }
 
-    if (ret < 0) {
-        i = 0;
-    } else {
-        i = ret;
+        /* in non-queue path, all IOs have to be completed */
+        abort_queue(s);
+        ret = len;
+    } else if (ret == 0) {
+        goto out;
     }
 
-    for (; i < len; i++) {
-        struct qemu_laiocb *laiocb =
-            container_of(s->io_q.iocbs[i], struct qemu_laiocb, iocb);
-
-        laiocb->ret = (ret < 0) ? ret : -EIO;
-        qemu_laio_process_completion(s, laiocb);
+    for (i = ret; i < len; i++) {
+        s->io_q.iocbs[j++] = s->io_q.iocbs[i];
     }
+
+ out:
+    /*
+     * update io queue, for partial completion, retry will be
+     * started automatically in following completion cb.
+     */
+    s->io_q.idx -= ret;
+
     return ret;
 }
 
-static void ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
+static void ioq_submit_retry(void *opaque)
+{
+    struct qemu_laio_state *s = opaque;
+    ioq_submit(s, false);
+}
+
+static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
 {
     unsigned int idx = s->io_q.idx;
 
+    if (unlikely(idx == s->io_q.size)) {
+        return -1;
+    }
+
     s->io_q.iocbs[idx++] = iocb;
     s->io_q.idx = idx;
 
-    /* submit immediately if queue is full */
-    if (idx == s->io_q.size) {
-        ioq_submit(s);
+    /* submit immediately if queue depth is above 2/3 */
+    if (idx > s->io_q.size * 2 / 3) {
+        return ioq_submit(s, true);
     }
+
+    return 0;
 }
 
 void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
@@ -251,7 +303,7 @@ int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
     }
 
     if (s->io_q.idx > 0) {
-        ret = ioq_submit(s);
+        ret = ioq_submit(s, false);
     }
 
     return ret;
@@ -295,7 +347,9 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
             goto out_free_aiocb;
         }
     } else {
-        ioq_enqueue(s, iocbs);
+        if (ioq_enqueue(s, iocbs) < 0) {
+            goto out_free_aiocb;
+        }
     }
     return &laiocb->common;
 
@@ -310,12 +364,14 @@ void laio_detach_aio_context(void *s_, AioContext *old_context)
 
     aio_set_event_notifier(old_context, &s->e, NULL);
     qemu_bh_delete(s->completion_bh);
+    qemu_bh_delete(s->io_q.retry);
 }
 
 void laio_attach_aio_context(void *s_, AioContext *new_context)
 {
     struct qemu_laio_state *s = s_;
 
+    s->io_q.retry = aio_bh_new(new_context, ioq_submit_retry, s);
     s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
     aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb);
 }
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Qemu-devel] [PATCH v2 2/4] linux-aio: handling -EAGAIN for !s->io_q.plugged case
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch Ming Lei
@ 2014-09-04 16:27 ` Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 3/4] linux-aio: remove 'node' from 'struct qemu_laiocb' Ming Lei
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Ming Lei @ 2014-09-04 16:27 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet, Ming Lei

Previously -EAGAIN is simply ignored for !s->io_q.plugged case,
and sometimes it is easy to cause -EIO to VM, such as NVME device.

This patch handles -EAGAIN by io queue for !s->io_q.plugged case,
and it will be retried in following aio completion cb.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 block/linux-aio.c |   22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index a06576d..20a87ec 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -276,6 +276,11 @@ static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
     s->io_q.iocbs[idx++] = iocb;
     s->io_q.idx = idx;
 
+    /* don't submit until next completion for -EAGAIN of non plug case */
+    if (unlikely(!s->io_q.plugged)) {
+        return 0;
+    }
+
     /* submit immediately if queue depth is above 2/3 */
     if (idx > s->io_q.size * 2 / 3) {
         return ioq_submit(s, true);
@@ -343,10 +348,25 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
     io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
 
     if (!s->io_q.plugged) {
-        if (io_submit(s->ctx, 1, &iocbs) < 0) {
+        int ret;
+
+        if (!s->io_q.idx) {
+            ret = io_submit(s->ctx, 1, &iocbs);
+        } else {
+            ret = -EAGAIN;
+        }
+        /*
+         * Switch to queue mode until -EAGAIN is handled, we suppose
+         * there is always uncompleted I/O, so try to enqueue it first,
+         * and will be submitted again in following aio completion cb.
+         */
+        if (ret == -EAGAIN) {
+            goto enqueue;
+        } else if (ret < 0) {
             goto out_free_aiocb;
         }
     } else {
+ enqueue:
         if (ioq_enqueue(s, iocbs) < 0) {
             goto out_free_aiocb;
         }
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Qemu-devel] [PATCH v2 3/4] linux-aio: remove 'node' from 'struct qemu_laiocb'
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 2/4] linux-aio: handling -EAGAIN for !s->io_q.plugged case Ming Lei
@ 2014-09-04 16:27 ` Ming Lei
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 4/4] linux-aio: increase max event to 256 Ming Lei
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Ming Lei @ 2014-09-04 16:27 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet, Ming Lei

No one uses the 'node' field any more, so remove it
from 'struct qemu_laiocb', and this can save 16byte
for the struct on 64bit arch.

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 block/linux-aio.c |    1 -
 1 file changed, 1 deletion(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index 20a87ec..f45a142 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -35,7 +35,6 @@ struct qemu_laiocb {
     size_t nbytes;
     QEMUIOVector *qiov;
     bool is_read;
-    QLIST_ENTRY(qemu_laiocb) node;
 };
 
 /*
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Qemu-devel] [PATCH v2 4/4] linux-aio: increase max event to 256
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
                   ` (2 preceding siblings ...)
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 3/4] linux-aio: remove 'node' from 'struct qemu_laiocb' Ming Lei
@ 2014-09-04 16:27 ` Ming Lei
  2014-09-09 14:39 ` [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
  2014-10-15 15:04 ` Ming Lei
  5 siblings, 0 replies; 8+ messages in thread
From: Ming Lei @ 2014-09-04 16:27 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet, Ming Lei

This patch increases max event to 256 for the coming
virtio-blk multi virtqueue support.

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 block/linux-aio.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index f45a142..5d565ad 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -23,7 +23,7 @@
  *      than this we will get EAGAIN from io_submit which is communicated to
  *      the guest as an I/O error.
  */
-#define MAX_EVENTS 128
+#define MAX_EVENTS 256
 
 #define MAX_QUEUED_IO  128
 
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
                   ` (3 preceding siblings ...)
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 4/4] linux-aio: increase max event to 256 Ming Lei
@ 2014-09-09 14:39 ` Ming Lei
  2014-10-15 15:04 ` Ming Lei
  5 siblings, 0 replies; 8+ messages in thread
From: Ming Lei @ 2014-09-09 14:39 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet

Hi Paolo, Stefan and Kevin,

On Fri, Sep 5, 2014 at 12:27 AM, Ming Lei <ming.lei@canonical.com> wrote:
> The 1st patch fixes batch submission.
>
> The 2nd one fixes -EAGAIN for non-batch case.
>
> The 3rd one is a cleanup.
>
> The 4th one increase max event to 256 for supporting the comming
> multi virt-queue.
>
> This patchset is splitted from previous patchset(dataplane: optimization
> and multi virtqueue support), as suggested by Stefan.
>
> These patches have been running well in my box for weeks, and hope
> they can be merged soon, and I have some patches which do depend them.
>
> V2:
>         - code style fix and commit log fix as suggested by Benoît Canet
>
> V1:
>         - rebase on latest QEMU master
>

Could you take a look at this patchset?  It has been blocked for weeks.


Thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch
  2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch Ming Lei
@ 2014-09-09 14:53   ` Benoît Canet
  0 siblings, 0 replies; 8+ messages in thread
From: Benoît Canet @ 2014-09-09 14:53 UTC (permalink / raw)
  To: Ming Lei
  Cc: Kevin Wolf, Peter Maydell, qemu-devel, Stefan Hajnoczi,
	Paolo Bonzini, Benoît Canet

The Friday 05 Sep 2014 à 00:27:07 (+0800), Ming Lei wrote :
> In the enqueue path, we can't complete request, otherwise
> "Co-routine re-entered recursively" may be caused, so this
> patch fixes the issue with the following ideas:
> 
> 	- for -EAGAIN or partial completion, retry the submission by
> 	scheduling a BH in following completion cb
> 	- for part of completion, also update the io queue
> 	- for other failure, return the failure if in enqueue path,
> 	otherwise, abort all queued I/O
> 
> Signed-off-by: Ming Lei <ming.lei@canonical.com>
> ---
>  block/linux-aio.c |  106 ++++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 81 insertions(+), 25 deletions(-)
> 
> diff --git a/block/linux-aio.c b/block/linux-aio.c
> index 9aca758..a06576d 100644
> --- a/block/linux-aio.c
> +++ b/block/linux-aio.c
> @@ -38,11 +38,19 @@ struct qemu_laiocb {
>      QLIST_ENTRY(qemu_laiocb) node;
>  };
>  
> -typedef struct {
> +/*
> + * TODO: support to batch I/O from multiple bs in one same
> + * AIO context, one important use case is multi-lun scsi,
> + * so in future the IO queue should be per AIO context.
> + */
> +typedef struct LaioQueue {
>      struct iocb *iocbs[MAX_QUEUED_IO];
>      int plugged;
> -    unsigned int size;
> -    unsigned int idx;
> +    uint32 size;
> +    uint32 idx;

Sorry Ming I said crap about struct, size and idx.
I initially misread that you where adding this.
You where right from the start.

> +
> +    /* handle -EAGAIN and partial completion */
> +    QEMUBH *retry;
>  } LaioQueue;
>  
>  struct qemu_laio_state {
> @@ -138,6 +146,13 @@ static void qemu_laio_completion_bh(void *opaque)
>      }
>  }
>  
> +static void qemu_laio_start_retry(struct qemu_laio_state *s)
> +{
> +    if (s->io_q.idx) {
> +        qemu_bh_schedule(s->io_q.retry);
> +    }
> +}
> +
>  static void qemu_laio_completion_cb(EventNotifier *e)
>  {
>      struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
> @@ -145,6 +160,7 @@ static void qemu_laio_completion_cb(EventNotifier *e)
>      if (event_notifier_test_and_clear(&s->e)) {
>          qemu_bh_schedule(s->completion_bh);
>      }
> +    qemu_laio_start_retry(s);
>  }
>  
>  static void laio_cancel(BlockDriverAIOCB *blockacb)
> @@ -164,6 +180,7 @@ static void laio_cancel(BlockDriverAIOCB *blockacb)
>      ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
>      if (ret == 0) {
>          laiocb->ret = -ECANCELED;
> +        qemu_laio_start_retry(laiocb->ctx);
>          return;
>      }
>  
> @@ -191,45 +208,80 @@ static void ioq_init(LaioQueue *io_q)
>      io_q->plugged = 0;
>  }
>  
> -static int ioq_submit(struct qemu_laio_state *s)
> +static void abort_queue(struct qemu_laio_state *s)
> +{
> +    int i;
> +    for (i = 0; i < s->io_q.idx; i++) {
> +        struct qemu_laiocb *laiocb = container_of(s->io_q.iocbs[i],
> +                                                  struct qemu_laiocb,
> +                                                  iocb);
> +        laiocb->ret = -EIO;
> +        qemu_laio_process_completion(s, laiocb);
> +    }
> +}
> +
> +static int ioq_submit(struct qemu_laio_state *s, bool enqueue)
>  {
>      int ret, i = 0;
>      int len = s->io_q.idx;
> +    int j = 0;
>  
> -    do {
> -        ret = io_submit(s->ctx, len, s->io_q.iocbs);
> -    } while (i++ < 3 && ret == -EAGAIN);
> +    if (!len) {
> +        return 0;
> +    }
>  
> -    /* empty io queue */
> -    s->io_q.idx = 0;
> +    ret = io_submit(s->ctx, len, s->io_q.iocbs);
> +    if (ret == -EAGAIN) { /* retry in following completion cb */
> +        return 0;
> +    } else if (ret < 0) {
> +        if (enqueue) {
> +            return ret;
> +        }
>  
> -    if (ret < 0) {
> -        i = 0;
> -    } else {
> -        i = ret;
> +        /* in non-queue path, all IOs have to be completed */
> +        abort_queue(s);
> +        ret = len;
> +    } else if (ret == 0) {
> +        goto out;
>      }
>  
> -    for (; i < len; i++) {
> -        struct qemu_laiocb *laiocb =
> -            container_of(s->io_q.iocbs[i], struct qemu_laiocb, iocb);
> -
> -        laiocb->ret = (ret < 0) ? ret : -EIO;
> -        qemu_laio_process_completion(s, laiocb);
> +    for (i = ret; i < len; i++) {
> +        s->io_q.iocbs[j++] = s->io_q.iocbs[i];
>      }
> +
> + out:
> +    /*
> +     * update io queue, for partial completion, retry will be
> +     * started automatically in following completion cb.
> +     */
> +    s->io_q.idx -= ret;
> +
>      return ret;
>  }
>  
> -static void ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
> +static void ioq_submit_retry(void *opaque)
> +{
> +    struct qemu_laio_state *s = opaque;
> +    ioq_submit(s, false);
> +}
> +
> +static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
>  {
>      unsigned int idx = s->io_q.idx;
>  
> +    if (unlikely(idx == s->io_q.size)) {
> +        return -1;
> +    }
> +
>      s->io_q.iocbs[idx++] = iocb;
>      s->io_q.idx = idx;
>  
> -    /* submit immediately if queue is full */
> -    if (idx == s->io_q.size) {
> -        ioq_submit(s);
> +    /* submit immediately if queue depth is above 2/3 */
> +    if (idx > s->io_q.size * 2 / 3) {
> +        return ioq_submit(s, true);
>      }
> +
> +    return 0;
>  }
>  
>  void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
> @@ -251,7 +303,7 @@ int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
>      }
>  
>      if (s->io_q.idx > 0) {
> -        ret = ioq_submit(s);
> +        ret = ioq_submit(s, false);
>      }
>  
>      return ret;
> @@ -295,7 +347,9 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
>              goto out_free_aiocb;
>          }
>      } else {
> -        ioq_enqueue(s, iocbs);
> +        if (ioq_enqueue(s, iocbs) < 0) {
> +            goto out_free_aiocb;
> +        }
>      }
>      return &laiocb->common;
>  
> @@ -310,12 +364,14 @@ void laio_detach_aio_context(void *s_, AioContext *old_context)
>  
>      aio_set_event_notifier(old_context, &s->e, NULL);
>      qemu_bh_delete(s->completion_bh);
> +    qemu_bh_delete(s->io_q.retry);
>  }
>  
>  void laio_attach_aio_context(void *s_, AioContext *new_context)
>  {
>      struct qemu_laio_state *s = s_;
>  
> +    s->io_q.retry = aio_bh_new(new_context, ioq_submit_retry, s);
>      s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
>      aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb);
>  }
> -- 
> 1.7.9.5
> 
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission
  2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
                   ` (4 preceding siblings ...)
  2014-09-09 14:39 ` [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
@ 2014-10-15 15:04 ` Ming Lei
  5 siblings, 0 replies; 8+ messages in thread
From: Ming Lei @ 2014-10-15 15:04 UTC (permalink / raw)
  To: qemu-devel, Peter Maydell, Paolo Bonzini, Stefan Hajnoczi, Kevin Wolf
  Cc: Benoît Canet

Hi Stefan and Guys,

On Thu, Sep 4, 2014 at 6:27 PM, Ming Lei <ming.lei@canonical.com> wrote:
> The 1st patch fixes batch submission.
>
> The 2nd one fixes -EAGAIN for non-batch case.
>
> The 3rd one is a cleanup.
>
> The 4th one increase max event to 256 for supporting the comming
> multi virt-queue.
>
> This patchset is splitted from previous patchset(dataplane: optimization
> and multi virtqueue support), as suggested by Stefan.
>
> These patches have been running well in my box for weeks, and hope
> they can be merged soon, and I have some patches which do depend them.
>
> V2:
>         - code style fix and commit log fix as suggested by Benoît Canet
>
> V1:
>         - rebase on latest QEMU master
>
>  block/linux-aio.c |  131 +++++++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 103 insertions(+), 28 deletions(-)

Could you take a look at these patches or the first two/three only?

With this fix merged, some work can be started to extend
submitting I/O as batch to multi-lun SCSI case, and this kind of
optimization can't be done by vhost-scsi at all.

Also multi-queue patches depend on these patches too.

The 1st one has one line conflict with io_cancel() against QEMU
master, and please let me know if you need me to resend or
other comments.

Thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-10-15 15:04 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-04 16:27 [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 1/4] linux-aio: fix submit aio as a batch Ming Lei
2014-09-09 14:53   ` Benoît Canet
2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 2/4] linux-aio: handling -EAGAIN for !s->io_q.plugged case Ming Lei
2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 3/4] linux-aio: remove 'node' from 'struct qemu_laiocb' Ming Lei
2014-09-04 16:27 ` [Qemu-devel] [PATCH v2 4/4] linux-aio: increase max event to 256 Ming Lei
2014-09-09 14:39 ` [Qemu-devel] [PATCH v2 0/4] linux-aio: fix batch submission Ming Lei
2014-10-15 15:04 ` Ming Lei

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.