From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:57323) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aSoTQ-0006P3-2y for qemu-devel@nongnu.org; Mon, 08 Feb 2016 11:15:37 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1aSoTN-0002TQ-WD for qemu-devel@nongnu.org; Mon, 08 Feb 2016 11:15:35 -0500 Received: from mail-wm0-x242.google.com ([2a00:1450:400c:c09::242]:36168) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aSoTN-0002TE-Mu for qemu-devel@nongnu.org; Mon, 08 Feb 2016 11:15:33 -0500 Received: by mail-wm0-x242.google.com with SMTP id 128so16172059wmz.3 for ; Mon, 08 Feb 2016 08:15:33 -0800 (PST) Sender: Paolo Bonzini From: Paolo Bonzini Date: Mon, 8 Feb 2016 17:15:04 +0100 Message-Id: <1454948107-11844-14-git-send-email-pbonzini@redhat.com> In-Reply-To: <1454948107-11844-1-git-send-email-pbonzini@redhat.com> References: <1454948107-11844-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH 13/16] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: stefanha@redhat.com Signed-off-by: Paolo Bonzini --- aio-posix.c | 54 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/aio-posix.c b/aio-posix.c index 450da51..cbdc6e4 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "qemu-common.h" #include "block/block.h" -#include "qemu/queue.h" +#include "qemu/rcu_queue.h" #include "qemu/sockets.h" #ifdef CONFIG_EPOLL #include @@ -213,6 +213,8 @@ void aio_set_fd_handler(AioContext *ctx, bool is_new = false; bool deleted = false; + qemu_lockcnt_lock(&ctx->list_lock); + node = find_aio_handler(ctx, fd); /* Are we deleting the fd handler? */ @@ -220,14 +222,14 @@ void aio_set_fd_handler(AioContext *ctx, if (node) { g_source_remove_poll(&ctx->source, &node->pfd); - /* If the lock is held, just mark the node as deleted */ - if (ctx->walking_handlers) { + /* If aio_poll is in progress, just mark the node as deleted */ + if (qemu_lockcnt_count(&ctx->list_lock)) { node->deleted = 1; node->pfd.revents = 0; } else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after - * releasing the walking_handlers lock. + * releasing the list_lock. */ QLIST_REMOVE(node, node); deleted = true; @@ -238,7 +240,7 @@ void aio_set_fd_handler(AioContext *ctx, /* Alloc and insert if it's not already there */ node = g_new0(AioHandler, 1); node->pfd.fd = fd; - QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); + QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); g_source_add_poll(&ctx->source, &node->pfd); is_new = true; @@ -254,6 +256,7 @@ void aio_set_fd_handler(AioContext *ctx, } aio_epoll_update(ctx, node, is_new); + qemu_lockcnt_unlock(&ctx->list_lock); aio_notify(ctx); if (deleted) { g_free(node); @@ -277,20 +280,30 @@ bool aio_prepare(AioContext *ctx) bool aio_pending(AioContext *ctx) { AioHandler *node; + bool result = false; - QLIST_FOREACH(node, &ctx->aio_handlers, node) { + /* + * We have to walk very carefully in case aio_set_fd_handler is + * called while we're walking. + */ + qemu_lockcnt_inc(&ctx->list_lock); + + QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { int revents; revents = node->pfd.revents & node->pfd.events; if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { - return true; + result = true; + break; } if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { - return true; + result = true; + break; } } + qemu_lockcnt_dec(&ctx->list_lock); - return false; + return result; } bool aio_dispatch(AioContext *ctx) @@ -311,13 +324,12 @@ bool aio_dispatch(AioContext *ctx) * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ - ctx->walking_handlers++; + qemu_lockcnt_inc(&ctx->list_lock); - QLIST_FOREACH_SAFE(node, &ctx->aio_handlers, node, tmp) { + QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { int revents; - revents = node->pfd.revents & node->pfd.events; - node->pfd.revents = 0; + revents = atomic_xchg(&node->pfd.revents, 0) & node->pfd.events; if (!node->deleted && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && @@ -337,16 +349,15 @@ bool aio_dispatch(AioContext *ctx) } if (node->deleted) { - ctx->walking_handlers--; - if (!ctx->walking_handlers) { + if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { QLIST_REMOVE(node, node); g_free(node); + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); } - ctx->walking_handlers++; } } - ctx->walking_handlers--; + qemu_lockcnt_dec(&ctx->list_lock); /* Run our timers */ progress |= timerlistgroup_run_timers(&ctx->tlg); @@ -421,12 +432,11 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) atomic_add(&ctx->notify_me, 2); } - ctx->walking_handlers++; - + qemu_lockcnt_inc(&ctx->list_lock); assert(npfd == 0); /* fill pollfds */ - QLIST_FOREACH(node, &ctx->aio_handlers, node) { + QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { if (!node->deleted && node->pfd.events && !aio_epoll_enabled(ctx) && aio_node_check(ctx, node->is_external)) { @@ -463,12 +473,12 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) /* if we have any readable fds, dispatch event */ if (ret > 0) { for (i = 0; i < npfd; i++) { - nodes[i]->pfd.revents = pollfds[i].revents; + atomic_or(&nodes[i]->pfd.revents, pollfds[i].revents); } } npfd = 0; - ctx->walking_handlers--; + qemu_lockcnt_dec(&ctx->list_lock); /* Run dispatch even if there were no readable fds to run timers */ if (aio_dispatch(ctx)) { -- 2.5.0