qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Fam Zheng" <fam@euphon.net>, "Kevin Wolf" <kwolf@redhat.com>,
	qemu-block@nongnu.org, "Max Reitz" <mreitz@redhat.com>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Marc-André Lureau" <marcandre.lureau@redhat.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>
Subject: [PATCH 4/5] aio-posix: make AioHandler deletion O(1)
Date: Fri, 14 Feb 2020 17:17:11 +0000	[thread overview]
Message-ID: <20200214171712.541358-5-stefanha@redhat.com> (raw)
In-Reply-To: <20200214171712.541358-1-stefanha@redhat.com>

It is not necessary to scan all AioHandlers for deletion.  Keep a list
of deleted handlers instead of scanning the full list of all handlers.

The AioHandler->deleted field can be dropped.  Let's check if the
handler has been inserted into the deleted list instead.  Add a new
QLIST_IS_INSERTED() API for this check.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 include/block/aio.h  |  6 ++++-
 include/qemu/queue.h |  3 +++
 util/aio-posix.c     | 53 +++++++++++++++++++++++++++++---------------
 3 files changed, 43 insertions(+), 19 deletions(-)

diff --git a/include/block/aio.h b/include/block/aio.h
index 7ba9bd7874..1a0de1508c 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -42,6 +42,7 @@ void qemu_aio_unref(void *p);
 void qemu_aio_ref(void *p);
 
 typedef struct AioHandler AioHandler;
+typedef QLIST_HEAD(, AioHandler) AioHandlerList;
 typedef void QEMUBHFunc(void *opaque);
 typedef bool AioPollFn(void *opaque);
 typedef void IOHandler(void *opaque);
@@ -58,7 +59,10 @@ struct AioContext {
     QemuRecMutex lock;
 
     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
-    QLIST_HEAD(, AioHandler) aio_handlers;
+    AioHandlerList aio_handlers;
+
+    /* The list of AIO handlers to be deleted.  Protected by ctx->list_lock. */
+    AioHandlerList deleted_aio_handlers;
 
     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
      * accessed with atomic primitives.  If this field is 0, everything
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index a276363372..699a8a0568 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -158,6 +158,9 @@ struct {                                                                \
         }                                                               \
 } while (/*CONSTCOND*/0)
 
+/* Is elm in a list? */
+#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
+
 #define QLIST_FOREACH(var, head, field)                                 \
         for ((var) = ((head)->lh_first);                                \
                 (var);                                                  \
diff --git a/util/aio-posix.c b/util/aio-posix.c
index b21bcd8e97..3a98a2acb9 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -31,10 +31,10 @@ struct AioHandler
     AioPollFn *io_poll;
     IOHandler *io_poll_begin;
     IOHandler *io_poll_end;
-    int deleted;
     void *opaque;
     bool is_external;
     QLIST_ENTRY(AioHandler) node;
+    QLIST_ENTRY(AioHandler) node_deleted;
 };
 
 #ifdef CONFIG_EPOLL_CREATE1
@@ -67,7 +67,7 @@ static bool aio_epoll_try_enable(AioContext *ctx)
 
     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
         int r;
-        if (node->deleted || !node->pfd.events) {
+        if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
             continue;
         }
         event.events = epoll_events_from_pfd(node->pfd.events);
@@ -195,9 +195,11 @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
     AioHandler *node;
 
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
-        if (node->pfd.fd == fd)
-            if (!node->deleted)
+        if (node->pfd.fd == fd) {
+            if (!QLIST_IS_INSERTED(node, node_deleted)) {
                 return node;
+            }
+        }
     }
 
     return NULL;
@@ -216,7 +218,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
 
     /* If a read is in progress, just mark the node as deleted */
     if (qemu_lockcnt_count(&ctx->list_lock)) {
-        node->deleted = 1;
+        QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
         node->pfd.revents = 0;
         return false;
     }
@@ -358,7 +360,7 @@ static void poll_set_started(AioContext *ctx, bool started)
     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
         IOHandler *fn;
 
-        if (node->deleted) {
+        if (QLIST_IS_INSERTED(node, node_deleted)) {
             continue;
         }
 
@@ -415,6 +417,26 @@ bool aio_pending(AioContext *ctx)
     return result;
 }
 
+static void aio_free_deleted_handlers(AioContext *ctx)
+{
+    AioHandler *node;
+
+    if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
+        return;
+    }
+    if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
+        return; /* we are nested, let the parent do the freeing */
+    }
+
+    while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
+        QLIST_REMOVE(node, node);
+        QLIST_REMOVE(node, node_deleted);
+        g_free(node);
+    }
+
+    qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
+}
+
 static bool aio_dispatch_handlers(AioContext *ctx)
 {
     AioHandler *node, *tmp;
@@ -426,7 +448,7 @@ static bool aio_dispatch_handlers(AioContext *ctx)
         revents = node->pfd.revents & node->pfd.events;
         node->pfd.revents = 0;
 
-        if (!node->deleted &&
+        if (!QLIST_IS_INSERTED(node, node_deleted) &&
             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
             aio_node_check(ctx, node->is_external) &&
             node->io_read) {
@@ -437,21 +459,13 @@ static bool aio_dispatch_handlers(AioContext *ctx)
                 progress = true;
             }
         }
-        if (!node->deleted &&
+        if (!QLIST_IS_INSERTED(node, node_deleted) &&
             (revents & (G_IO_OUT | G_IO_ERR)) &&
             aio_node_check(ctx, node->is_external) &&
             node->io_write) {
             node->io_write(node->opaque);
             progress = true;
         }
-
-        if (node->deleted) {
-            if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
-                QLIST_REMOVE(node, node);
-                g_free(node);
-                qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
-            }
-        }
     }
 
     return progress;
@@ -462,6 +476,7 @@ void aio_dispatch(AioContext *ctx)
     qemu_lockcnt_inc(&ctx->list_lock);
     aio_bh_poll(ctx);
     aio_dispatch_handlers(ctx);
+    aio_free_deleted_handlers(ctx);
     qemu_lockcnt_dec(&ctx->list_lock);
 
     timerlistgroup_run_timers(&ctx->tlg);
@@ -519,7 +534,7 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
     AioHandler *node;
 
     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
-        if (!node->deleted && node->io_poll &&
+        if (!QLIST_IS_INSERTED(node, node_deleted) && node->io_poll &&
             aio_node_check(ctx, node->is_external) &&
             node->io_poll(node->opaque)) {
             /*
@@ -653,7 +668,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
 
         if (!aio_epoll_enabled(ctx)) {
             QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
-                if (!node->deleted && node->pfd.events
+                if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
                     && aio_node_check(ctx, node->is_external)) {
                     add_pollfd(node);
                 }
@@ -730,6 +745,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
         progress |= aio_dispatch_handlers(ctx);
     }
 
+    aio_free_deleted_handlers(ctx);
+
     qemu_lockcnt_dec(&ctx->list_lock);
 
     progress |= timerlistgroup_run_timers(&ctx->tlg);
-- 
2.24.1


  parent reply	other threads:[~2020-02-14 17:19 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-14 17:17 [PATCH 0/5] aio-posix: towards an O(1) event loop Stefan Hajnoczi
2020-02-14 17:17 ` [PATCH 1/5] aio-posix: fix use after leaving scope in aio_poll() Stefan Hajnoczi
2020-02-19  7:02   ` Sergio Lopez
2020-02-14 17:17 ` [PATCH 2/5] aio-posix: don't pass ns timeout to epoll_wait() Stefan Hajnoczi
2020-02-19 10:12   ` Sergio Lopez
2020-02-14 17:17 ` [PATCH 3/5] qemu/queue.h: add QLIST_SAFE_REMOVE() Stefan Hajnoczi
2020-02-19 10:30   ` Sergio Lopez
2020-02-14 17:17 ` Stefan Hajnoczi [this message]
2020-02-19 10:41   ` [PATCH 4/5] aio-posix: make AioHandler deletion O(1) Sergio Lopez
2020-02-14 17:17 ` [PATCH 5/5] aio-posix: make AioHandler dispatch O(1) with epoll Stefan Hajnoczi
2020-02-19 11:00   ` Sergio Lopez
2020-02-19 11:13   ` Paolo Bonzini
2020-02-21 12:59     ` Stefan Hajnoczi
2020-02-21 13:06       ` Paolo Bonzini
2020-02-21 14:44         ` Stefan Hajnoczi
2020-02-21 14:47         ` Stefan Hajnoczi
2020-02-21 15:04           ` Paolo Bonzini
2020-02-21 15:29             ` Stefan Hajnoczi
2020-02-21 15:37               ` Paolo Bonzini
2020-02-21 15:29 ` [PATCH 0/5] aio-posix: towards an O(1) event loop Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200214171712.541358-5-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=fam@euphon.net \
    --cc=kwolf@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).