From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:50931) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1eqXzG-0004aU-8W for qemu-devel@nongnu.org; Tue, 27 Feb 2018 00:39:40 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1eqXzC-0000Qb-SB for qemu-devel@nongnu.org; Tue, 27 Feb 2018 00:39:38 -0500 Received: from out2-smtp.messagingengine.com ([66.111.4.26]:49779) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1eqXzC-0000PD-NN for qemu-devel@nongnu.org; Tue, 27 Feb 2018 00:39:34 -0500 From: "Emilio G. Cota" Date: Tue, 27 Feb 2018 00:39:13 -0500 Message-Id: <1519709965-29833-5-git-send-email-cota@braap.org> In-Reply-To: <1519709965-29833-1-git-send-email-cota@braap.org> References: <1519709965-29833-1-git-send-email-cota@braap.org> Subject: [Qemu-devel] [PATCH 04/16] tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Richard Henderson , Paolo Bonzini Thereby making it per-TCGContext. Once we remove tb_lock, this will avoid an atomic increment every time a TB is invalidated. Signed-off-by: Emilio G. Cota --- accel/tcg/translate-all.c | 5 +++-- include/exec/tb-context.h | 1 - tcg/tcg.c | 14 ++++++++++++++ tcg/tcg.h | 3 +++ 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 3a51d49..20ad3fc 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -1072,7 +1072,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) /* suppress any remaining jumps to this TB */ tb_jmp_unlink(tb); - tb_ctx.tb_phys_invalidate_count++; + atomic_set(&tcg_ctx->tb_phys_invalidate_count, + tcg_ctx->tb_phys_invalidate_count + 1); } #ifdef CONFIG_SOFTMMU @@ -1862,7 +1863,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) cpu_fprintf(f, "\nStatistics:\n"); cpu_fprintf(f, "TB flush count %u\n", atomic_read(&tb_ctx.tb_flush_count)); - cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count); + cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count()); cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count()); tcg_dump_info(f, cpu_fprintf); } diff --git a/include/exec/tb-context.h b/include/exec/tb-context.h index d8472c8..8c9b49c 100644 --- a/include/exec/tb-context.h +++ b/include/exec/tb-context.h @@ -37,7 +37,6 @@ struct TBContext { /* statistics */ unsigned tb_flush_count; - int tb_phys_invalidate_count; }; extern TBContext tb_ctx; diff --git a/tcg/tcg.c b/tcg/tcg.c index b471708..a7b596e 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -791,6 +791,20 @@ size_t tcg_code_capacity(void) return capacity; } +size_t tcg_tb_phys_invalidate_count(void) +{ + unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int i; + size_t total = 0; + + for (i = 0; i < n_ctxs; i++) { + const TCGContext *s = atomic_read(&tcg_ctxs[i]); + + total += atomic_read(&s->tb_phys_invalidate_count); + } + return total; +} + /* pool based memory allocation */ void *tcg_malloc_internal(TCGContext *s, int size) { diff --git a/tcg/tcg.h b/tcg/tcg.h index 8bf29cc..9dd9448 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -694,6 +694,8 @@ struct TCGContext { /* Threshold to flush the translated code buffer. */ void *code_gen_highwater; + size_t tb_phys_invalidate_count; + /* Track which vCPU triggers events */ CPUState *cpu; /* *_trans */ @@ -852,6 +854,7 @@ size_t tcg_code_capacity(void); void tcg_tb_insert(TranslationBlock *tb); void tcg_tb_remove(TranslationBlock *tb); +size_t tcg_tb_phys_invalidate_count(void); TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); size_t tcg_nb_tbs(void); -- 2.7.4