From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:44838) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDVTi-0002s7-18 for qemu-devel@nongnu.org; Fri, 10 Jul 2015 06:24:23 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZDVTd-0004GP-Ab for qemu-devel@nongnu.org; Fri, 10 Jul 2015 06:24:21 -0400 Received: from mail-wi0-x22b.google.com ([2a00:1450:400c:c05::22b]:34651) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZDVTc-0004GJ-Uw for qemu-devel@nongnu.org; Fri, 10 Jul 2015 06:24:17 -0400 Received: by wifm2 with SMTP id m2so42301101wif.1 for ; Fri, 10 Jul 2015 03:24:16 -0700 (PDT) Sender: Paolo Bonzini References: <1436516626-8322-1-git-send-email-a.rigo@virtualopensystems.com> <1436516626-8322-10-git-send-email-a.rigo@virtualopensystems.com> <559F9231.7070800@redhat.com> From: Paolo Bonzini Message-ID: <559F9D4C.3050703@redhat.com> Date: Fri, 10 Jul 2015 12:24:12 +0200 MIME-Version: 1.0 In-Reply-To: Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [RFC v3 09/13] cpus.c: introduce simple callback support List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: alvise rigo Cc: mttcg@listserver.greensocs.com, Claudio Fontana , QEMU Developers , Jani Kokkonen , VirtualOpenSystems Technical Team , =?UTF-8?Q?Alex_Benn=c3=a9e?= On 10/07/2015 11:47, alvise rigo wrote: > I tried to use it, but it would then create a deadlock at a very early > stage of the stress test. > The problem is likely related to the fact that flush_queued_work > happens with the global mutex locked. Let's fix that and move the global mutex inside the callbacks. I can take a look. Paolo > As Frederick suggested, we can use the newly introduced > flush_queued_safe_work for this. > > Regards, > alvise > > On Fri, Jul 10, 2015 at 11:36 AM, Paolo Bonzini wrote: >> >> >> On 10/07/2015 10:23, Alvise Rigo wrote: >>> In order to perfom "lazy" TLB invalidation requests, introduce a >>> queue of callbacks at every vCPU disposal that will be fired just >>> before entering the next TB. >>> >>> Suggested-by: Jani Kokkonen >>> Suggested-by: Claudio Fontana >>> Signed-off-by: Alvise Rigo >> >> Why is async_run_on_cpu not enough? >> >> Paolo >> >>> --- >>> cpus.c | 34 ++++++++++++++++++++++++++++++++++ >>> exec.c | 1 + >>> include/qom/cpu.h | 20 ++++++++++++++++++++ >>> 3 files changed, 55 insertions(+) >>> >>> diff --git a/cpus.c b/cpus.c >>> index f4d938e..b9f0329 100644 >>> --- a/cpus.c >>> +++ b/cpus.c >>> @@ -1421,6 +1421,7 @@ static int tcg_cpu_exec(CPUArchState *env) >>> cpu->icount_extra = count; >>> } >>> qemu_mutex_unlock_iothread(); >>> + cpu_exit_callbacks_call_all(cpu); >>> ret = cpu_exec(env); >>> cpu->tcg_executing = 0; >>> >>> @@ -1469,6 +1470,39 @@ static void tcg_exec_all(CPUState *cpu) >>> cpu->exit_request = 0; >>> } >>> >>> +void cpu_exit_callback_add(CPUState *cpu, CPUExitCallback callback, >>> + void *opaque) >>> +{ >>> + CPUExitCB *cb; >>> + >>> + cb = g_malloc(sizeof(*cb)); >>> + cb->callback = callback; >>> + cb->opaque = opaque; >>> + >>> + qemu_mutex_lock(&cpu->exit_cbs.mutex); >>> + QTAILQ_INSERT_TAIL(&cpu->exit_cbs.exit_callbacks, cb, entry); >>> + qemu_mutex_unlock(&cpu->exit_cbs.mutex); >>> +} >>> + >>> +void cpu_exit_callbacks_call_all(CPUState *cpu) >>> +{ >>> + CPUExitCB *cb, *next; >>> + >>> + if (QTAILQ_EMPTY(&cpu->exit_cbs.exit_callbacks)) { >>> + return; >>> + } >>> + >>> + QTAILQ_FOREACH_SAFE(cb, &cpu->exit_cbs.exit_callbacks, entry, next) { >>> + cb->callback(cpu, cb->opaque); >>> + >>> + /* one-shot callbacks, remove it after using it */ >>> + qemu_mutex_lock(&cpu->exit_cbs.mutex); >>> + QTAILQ_REMOVE(&cpu->exit_cbs.exit_callbacks, cb, entry); >>> + g_free(cb); >>> + qemu_mutex_unlock(&cpu->exit_cbs.mutex); >>> + } >>> +} >>> + >>> void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) >>> { >>> /* XXX: implement xxx_cpu_list for targets that still miss it */ >>> diff --git a/exec.c b/exec.c >>> index 51958ed..322f2c6 100644 >>> --- a/exec.c >>> +++ b/exec.c >>> @@ -531,6 +531,7 @@ void cpu_exec_init(CPUArchState *env) >>> cpu->numa_node = 0; >>> QTAILQ_INIT(&cpu->breakpoints); >>> QTAILQ_INIT(&cpu->watchpoints); >>> + QTAILQ_INIT(&cpu->exit_cbs.exit_callbacks); >>> #ifndef CONFIG_USER_ONLY >>> cpu->as = &address_space_memory; >>> cpu->thread_id = qemu_get_thread_id(); >>> diff --git a/include/qom/cpu.h b/include/qom/cpu.h >>> index 8d121b3..0ec020b 100644 >>> --- a/include/qom/cpu.h >>> +++ b/include/qom/cpu.h >>> @@ -201,6 +201,24 @@ typedef struct CPUWatchpoint { >>> QTAILQ_ENTRY(CPUWatchpoint) entry; >>> } CPUWatchpoint; >>> >>> +/* vCPU exit callbacks */ >>> +typedef void (*CPUExitCallback)(CPUState *cpu, void *opaque); >>> +struct CPUExitCBs { >>> + QemuMutex mutex; >>> + QTAILQ_HEAD(exit_callbacks_head, CPUExitCB) exit_callbacks; >>> +}; >>> + >>> +typedef struct CPUExitCB { >>> + CPUExitCallback callback; >>> + void *opaque; >>> + >>> + QTAILQ_ENTRY(CPUExitCB) entry; >>> +} CPUExitCB; >>> + >>> +void cpu_exit_callback_add(CPUState *cpu, CPUExitCallback callback, >>> + void *opaque); >>> +void cpu_exit_callbacks_call_all(CPUState *cpu); >>> + >>> /* Rendezvous support */ >>> #define TCG_RDV_POLLING_PERIOD 10 >>> typedef struct CpuExitRendezvous { >>> @@ -305,6 +323,8 @@ struct CPUState { >>> >>> void *opaque; >>> >>> + /* One-shot callbacks for stopping requests. */ >>> + struct CPUExitCBs exit_cbs; >>> volatile int pending_rdv; >>> >>> /* In order to avoid passing too many arguments to the MMIO helpers, >>> > >