From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:39336) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UaWle-0000OQ-GV for qemu-devel@nongnu.org; Thu, 09 May 2013 15:44:43 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UaWld-0003Id-G6 for qemu-devel@nongnu.org; Thu, 09 May 2013 15:44:42 -0400 Received: from g5t0008.atlanta.hp.com ([15.192.0.45]:8986) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UaWld-0003H6-BU for qemu-devel@nongnu.org; Thu, 09 May 2013 15:44:41 -0400 From: Chegu Vinod Date: Thu, 9 May 2013 12:43:18 -0700 Message-Id: <1368128600-30721-2-git-send-email-chegu_vinod@hp.com> In-Reply-To: <1368128600-30721-1-git-send-email-chegu_vinod@hp.com> References: <1368128600-30721-1-git-send-email-chegu_vinod@hp.com> Subject: [Qemu-devel] [RFC PATCH v5 1/3] Introduce async_run_on_cpu() List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: eblake@redhat.com, anthony@codemonkey.ws, quintela@redhat.com, owasserm@redhat.com, pbonzini@redhat.com, qemu-devel@nongnu.org Cc: Chegu Vinod Introduce an asynchronous version of run_on_cpu() i.e. the caller doesn't have to block till the call back routine finishes execution on the target vcpu. Signed-off-by: Chegu Vinod --- cpus.c | 29 +++++++++++++++++++++++++++++ include/qemu-common.h | 1 + include/qom/cpu.h | 10 ++++++++++ 3 files changed, 40 insertions(+), 0 deletions(-) diff --git a/cpus.c b/cpus.c index c232265..8cd4eab 100644 --- a/cpus.c +++ b/cpus.c @@ -653,6 +653,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) wi.func = func; wi.data = data; + wi.free = false; if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { @@ -671,6 +672,31 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) } } +void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) +{ + struct qemu_work_item *wi; + + if (qemu_cpu_is_self(cpu)) { + func(data); + return; + } + + wi = g_malloc0(sizeof(struct qemu_work_item)); + wi->func = func; + wi->data = data; + wi->free = true; + if (cpu->queued_work_first == NULL) { + cpu->queued_work_first = wi; + } else { + cpu->queued_work_last->next = wi; + } + cpu->queued_work_last = wi; + wi->next = NULL; + wi->done = false; + + qemu_cpu_kick(cpu); +} + static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; @@ -683,6 +709,9 @@ static void flush_queued_work(CPUState *cpu) cpu->queued_work_first = wi->next; wi->func(wi->data); wi->done = true; + if (wi->free) { + g_free(wi); + } } cpu->queued_work_last = NULL; qemu_cond_broadcast(&qemu_work_cond); diff --git a/include/qemu-common.h b/include/qemu-common.h index b399d85..bad6e1f 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -286,6 +286,7 @@ struct qemu_work_item { void (*func)(void *data); void *data; int done; + bool free; }; #ifdef CONFIG_USER_ONLY diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 7cd9442..46465e9 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -265,6 +265,16 @@ bool cpu_is_stopped(CPUState *cpu); void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); /** + * async_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously. + */ +void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); + +/** * qemu_for_each_cpu: * @func: The function to be executed. * @data: Data to pass to the function. -- 1.7.1