From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:46436) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TVFPV-0000Yk-NG for qemu-devel@nongnu.org; Mon, 05 Nov 2012 00:39:46 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1TVFPU-0005zo-F6 for qemu-devel@nongnu.org; Mon, 05 Nov 2012 00:39:45 -0500 Received: from mail-oa0-f45.google.com ([209.85.219.45]:59473) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1TVFPU-0005zk-8Y for qemu-devel@nongnu.org; Mon, 05 Nov 2012 00:39:44 -0500 Received: by mail-oa0-f45.google.com with SMTP id i18so4960272oag.4 for ; Sun, 04 Nov 2012 21:39:43 -0800 (PST) From: Liu Ping Fan Date: Mon, 5 Nov 2012 13:38:43 +0800 Message-Id: <1352093924-17598-8-git-send-email-qemulist@gmail.com> In-Reply-To: <1352093924-17598-1-git-send-email-qemulist@gmail.com> References: <1352093924-17598-1-git-send-email-qemulist@gmail.com> Subject: [Qemu-devel] [PATCH v6 7/8] memory: introduce tls context to trace nested mmio request issue List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Peter Maydell , Jan Kiszka , Marcelo Tosatti , Avi Kivity , Anthony Liguori , Stefan Hajnoczi , Paolo Bonzini From: Liu Ping Fan After breaking down big lock, nested MMIO request which not targeting at RAM can cause deadlock issue. Supposing the scene: dev_a,b with fine-grain locks lockA/B, then ABBA dealock issue can be triggered. We fix this by tracing and rejecting such request. Signed-off-by: Liu Ping Fan --- exec.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ qemu-thread.h | 7 +++++++ 2 files changed, 54 insertions(+), 0 deletions(-) diff --git a/exec.c b/exec.c index fa34ef9..1eb920d 100644 --- a/exec.c +++ b/exec.c @@ -3442,6 +3442,48 @@ static bool address_space_section_lookup_ref(AddressSpace *as, return safe_ref; } +typedef struct ThreadContext { + DispatchType dispatch_type; + unsigned int mmio_req_pending; +} ThreadContext; + +static __thread ThreadContext thread_context = { + .dispatch_type = DISPATCH_INIT, + .mmio_req_pending = 0 +}; + +void qemu_thread_set_dispatch_type(DispatchType type) +{ + thread_context.dispatch_type = type; +} + +void qemu_thread_reset_dispatch_type(void) +{ + thread_context.dispatch_type = DISPATCH_INIT; +} + +static void address_space_check_inc_req_pending(MemoryRegionSection *section) +{ + bool nested = false; + + /* currently, only mmio out of big lock, and need this to avoid dead lock */ + if (thread_context.dispatch_type == DISPATCH_MMIO) { + nested = ++thread_context.mmio_req_pending > 1 ? true : false; + /* To fix, will filter iommu case */ + if (nested && !memory_region_is_ram(section->mr)) { + fprintf(stderr, "mmio: nested target not RAM is not support"); + abort(); + } + } +} + +static void address_space_dec_req_pending(void) +{ + if (thread_context.dispatch_type == DISPATCH_MMIO) { + thread_context.mmio_req_pending--; + } +} + void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, int len, bool is_write) { @@ -3462,6 +3504,8 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, qemu_mutex_lock(&as->lock); safe_ref = memory_region_section_lookup_ref(d, page, &obj_mrs); qemu_mutex_unlock(&as->lock); + address_space_check_inc_req_pending(&obj_mrs); + if (!safe_ref) { qemu_mutex_lock_iothread(); qemu_mutex_lock(&as->lock); @@ -3477,6 +3521,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, if (is_write) { if (!memory_region_is_ram(section->mr)) { target_phys_addr_t addr1; + addr1 = memory_region_section_addr(section, addr); /* XXX: could force cpu_single_env to NULL to avoid potential bugs */ @@ -3510,6 +3555,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { target_phys_addr_t addr1; + /* I/O case */ addr1 = memory_region_section_addr(section, addr); if (l >= 4 && ((addr1 & 3) == 0)) { @@ -3537,6 +3583,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, qemu_put_ram_ptr(ptr); } } + address_space_dec_req_pending(); memory_region_section_unref(&obj_mrs); len -= l; buf += l; diff --git a/qemu-thread.h b/qemu-thread.h index 05fdaaf..fc9e17b 100644 --- a/qemu-thread.h +++ b/qemu-thread.h @@ -7,6 +7,11 @@ typedef struct QemuMutex QemuMutex; typedef struct QemuCond QemuCond; typedef struct QemuThread QemuThread; +typedef enum { + DISPATCH_INIT = 0, + DISPATCH_MMIO, + DISPATCH_IO, +} DispatchType; #ifdef _WIN32 #include "qemu-thread-win32.h" @@ -46,4 +51,6 @@ void qemu_thread_get_self(QemuThread *thread); bool qemu_thread_is_self(QemuThread *thread); void qemu_thread_exit(void *retval); +void qemu_thread_set_dispatch_type(DispatchType type); +void qemu_thread_reset_dispatch_type(void); #endif -- 1.7.4.4