From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:37788) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aT6kp-0006tq-BM for qemu-devel@nongnu.org; Tue, 09 Feb 2016 06:46:48 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1aT6kn-0005L3-Rx for qemu-devel@nongnu.org; Tue, 09 Feb 2016 06:46:47 -0500 Received: from mx1.redhat.com ([209.132.183.28]:47605) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aT6kn-0005Kz-Kf for qemu-devel@nongnu.org; Tue, 09 Feb 2016 06:46:45 -0500 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (Postfix) with ESMTPS id 3F650C0C234B for ; Tue, 9 Feb 2016 11:46:45 +0000 (UTC) From: Paolo Bonzini Date: Tue, 9 Feb 2016 12:46:14 +0100 Message-Id: <1455018374-4706-17-git-send-email-pbonzini@redhat.com> In-Reply-To: <1455018374-4706-1-git-send-email-pbonzini@redhat.com> References: <1455018374-4706-1-git-send-email-pbonzini@redhat.com> Subject: [Qemu-devel] [PATCH 16/16] aio: push aio_context_acquire/release down to dispatching List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: stefanha@redhat.com The AioContext data structures are now protected by list_lock and/or they are walked with FOREACH_RCU primitives. There is no need anymore to acquire the AioContext for the entire duration of aio_dispatch. Instead, just acquire it before and after invoking the callbacks. The next step is then to push it further down. Signed-off-by: Paolo Bonzini --- aio-posix.c | 15 ++++++--------- aio-win32.c | 15 +++++++-------- async.c | 2 ++ 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/aio-posix.c b/aio-posix.c index cbdc6e4..015d41a 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -334,7 +334,9 @@ bool aio_dispatch(AioContext *ctx) if (!node->deleted && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && node->io_read) { + aio_context_acquire(ctx); node->io_read(node->opaque); + aio_context_release(ctx); /* aio_notify() does not count as progress */ if (node->opaque != &ctx->notifier) { @@ -344,7 +346,9 @@ bool aio_dispatch(AioContext *ctx) if (!node->deleted && (revents & (G_IO_OUT | G_IO_ERR)) && node->io_write) { + aio_context_acquire(ctx); node->io_write(node->opaque); + aio_context_release(ctx); progress = true; } @@ -360,7 +364,9 @@ bool aio_dispatch(AioContext *ctx) qemu_lockcnt_dec(&ctx->list_lock); /* Run our timers */ + aio_context_acquire(ctx); progress |= timerlistgroup_run_timers(&ctx->tlg); + aio_context_release(ctx); return progress; } @@ -418,7 +424,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) bool progress; int64_t timeout; - aio_context_acquire(ctx); progress = false; /* aio_notify can avoid the expensive event_notifier_set if @@ -447,9 +452,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) timeout = blocking ? aio_compute_timeout(ctx) : 0; /* wait until next event */ - if (timeout) { - aio_context_release(ctx); - } if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { AioHandler epoll_handler; @@ -464,9 +466,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) if (blocking) { atomic_sub(&ctx->notify_me, 2); } - if (timeout) { - aio_context_acquire(ctx); - } aio_notify_accept(ctx); @@ -485,8 +484,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) progress = true; } - aio_context_release(ctx); - return progress; } diff --git a/aio-win32.c b/aio-win32.c index 862f48c..7e90dfd 100644 --- a/aio-win32.c +++ b/aio-win32.c @@ -245,7 +245,9 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) if (!node->deleted && (revents || event_notifier_get_handle(node->e) == event) && node->io_notify) { + aio_context_acquire(ctx); node->io_notify(node->e); + aio_context_release(ctx); /* aio_notify() does not count as progress */ if (node->e != &ctx->notifier) { @@ -256,11 +258,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) if (!node->deleted && (node->io_read || node->io_write)) { if ((revents & G_IO_IN) && node->io_read) { + aio_context_acquire(ctx); node->io_read(node->opaque); + aio_context_release(ctx); progress = true; } if ((revents & G_IO_OUT) && node->io_write) { + aio_context_acquire(ctx); node->io_write(node->opaque); + aio_context_release(ctx); progress = true; } @@ -305,7 +311,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) int count; int timeout; - aio_context_acquire(ctx); progress = false; /* aio_notify can avoid the expensive event_notifier_set if @@ -347,17 +352,11 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) timeout = blocking && !have_select_revents ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; - if (timeout) { - aio_context_release(ctx); - } ret = WaitForMultipleObjects(count, events, FALSE, timeout); if (blocking) { assert(first); atomic_sub(&ctx->notify_me, 2); } - if (timeout) { - aio_context_acquire(ctx); - } if (first) { aio_notify_accept(ctx); @@ -380,8 +379,8 @@ bool aio_poll_internal(AioContext *ctx, bool blocking) progress |= aio_dispatch_handlers(ctx, event); } while (count > 0); + aio_context_acquire(ctx); progress |= timerlistgroup_run_timers(&ctx->tlg); - aio_context_release(ctx); return progress; } diff --git a/async.c b/async.c index 43a765b..03a8e69 100644 --- a/async.c +++ b/async.c @@ -88,7 +88,9 @@ int aio_bh_poll(AioContext *ctx) ret = 1; } bh->idle = 0; + aio_context_acquire(ctx); aio_bh_call(bh); + aio_context_release(ctx); } } -- 2.5.0