All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Emilio G. Cota" <cota@braap.org>
To: qemu-devel@nongnu.org
Cc: Richard Henderson <richard.henderson@linaro.org>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PATCH 05/16] translate-all: iterate over TBs in a page with PAGE_FOR_EACH_TB
Date: Tue, 27 Feb 2018 00:39:14 -0500	[thread overview]
Message-ID: <1519709965-29833-6-git-send-email-cota@braap.org> (raw)
In-Reply-To: <1519709965-29833-1-git-send-email-cota@braap.org>

This commit does several things, but to avoid churn I merged them all
into the same commit. To wit:

- Use uintptr_t instead of TranslationBlock * for the list of TBs in a page.
  Just like we did in (c37e6d7e "tcg: Use uintptr_t type for
  jmp_list_{next|first} fields of TB"), the rationale is the same: these
  are tagged pointers, not pointers. So use a more appropriate type.

- Only check the least significant bit of the tagged pointers. Masking
  with 3/~3 is unnecessary and confusing.

- Introduce the TB_FOR_EACH_TAGGED macro, and use it to define
  PAGE_FOR_EACH_TB, which improves readability.

- Update tb_page_remove to use PAGE_FOR_EACH_TB. In case there
  is a bug and we attempt to remove a TB that is not in the list, instead
  of segfaulting (since the list is NULL-terminated) we will reach
  g_assert_not_reached().

Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 accel/tcg/translate-all.c | 65 +++++++++++++++++++++++------------------------
 include/exec/exec-all.h   |  2 +-
 2 files changed, 33 insertions(+), 34 deletions(-)

diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 20ad3fc..06aa905 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -103,7 +103,7 @@
 
 typedef struct PageDesc {
     /* list of TBs intersecting this ram page */
-    TranslationBlock *first_tb;
+    uintptr_t first_tb;
 #ifdef CONFIG_SOFTMMU
     /* in order to optimize self modifying code, we count the number
        of lookups we do to a given page to use a bitmap */
@@ -114,6 +114,18 @@ typedef struct PageDesc {
 #endif
 } PageDesc;
 
+/* list iterators for lists of tagged pointers in TranslationBlock */
+#define TB_FOR_EACH_TAGGED(head, tb, n, field)                  \
+    for (n = (head) & 1,                                        \
+             tb = (TranslationBlock *)((head) & ~1);            \
+         tb;                                                    \
+         tb = (TranslationBlock *)tb->field[n],                 \
+             n = (uintptr_t)tb & 1,                             \
+             tb = (TranslationBlock *)((uintptr_t)tb & ~1))
+
+#define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
+    TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
+
 /* In system mode we want L1_MAP to be based on ram offsets,
    while in user mode we want it to be based on virtual addresses.  */
 #if !defined(CONFIG_USER_ONLY)
@@ -818,7 +830,7 @@ static void page_flush_tb_1(int level, void **lp)
         PageDesc *pd = *lp;
 
         for (i = 0; i < V_L2_SIZE; ++i) {
-            pd[i].first_tb = NULL;
+            pd[i].first_tb = (uintptr_t)NULL;
             invalidate_page_bitmap(pd + i);
         }
     } else {
@@ -946,21 +958,21 @@ static void tb_page_check(void)
 
 #endif /* CONFIG_USER_ONLY */
 
-static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
+static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
 {
     TranslationBlock *tb1;
+    uintptr_t *pprev;
     unsigned int n1;
 
-    for (;;) {
-        tb1 = *ptb;
-        n1 = (uintptr_t)tb1 & 3;
-        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
+    pprev = &pd->first_tb;
+    PAGE_FOR_EACH_TB(pd, tb1, n1) {
         if (tb1 == tb) {
-            *ptb = tb1->page_next[n1];
-            break;
+            *pprev = tb1->page_next[n1];
+            return;
         }
-        ptb = &tb1->page_next[n1];
+        pprev = &tb1->page_next[n1];
     }
+    g_assert_not_reached();
 }
 
 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
@@ -1048,12 +1060,12 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
     /* remove the TB from the page list */
     if (tb->page_addr[0] != page_addr) {
         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
-        tb_page_remove(&p->first_tb, tb);
+        tb_page_remove(p, tb);
         invalidate_page_bitmap(p);
     }
     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
-        tb_page_remove(&p->first_tb, tb);
+        tb_page_remove(p, tb);
         invalidate_page_bitmap(p);
     }
 
@@ -1084,10 +1096,7 @@ static void build_page_bitmap(PageDesc *p)
 
     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
 
-    tb = p->first_tb;
-    while (tb != NULL) {
-        n = (uintptr_t)tb & 3;
-        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
+    PAGE_FOR_EACH_TB(p, tb, n) {
         /* NOTE: this is subtle as a TB may span two physical pages */
         if (n == 0) {
             /* NOTE: tb_end may be after the end of the page, but
@@ -1102,7 +1111,6 @@ static void build_page_bitmap(PageDesc *p)
             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
         }
         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
-        tb = tb->page_next[n];
     }
 }
 #endif
@@ -1125,9 +1133,9 @@ static inline void tb_alloc_page(TranslationBlock *tb,
     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
     tb->page_next[n] = p->first_tb;
 #ifndef CONFIG_USER_ONLY
-    page_already_protected = p->first_tb != NULL;
+    page_already_protected = p->first_tb != (uintptr_t)NULL;
 #endif
-    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
+    p->first_tb = (uintptr_t)tb | n;
     invalidate_page_bitmap(p);
 
 #if defined(CONFIG_USER_ONLY)
@@ -1404,7 +1412,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
                                    int is_cpu_write_access)
 {
-    TranslationBlock *tb, *tb_next;
+    TranslationBlock *tb;
     tb_page_addr_t tb_start, tb_end;
     PageDesc *p;
     int n;
@@ -1435,11 +1443,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
     /* we remove all the TBs in the range [start, end[ */
     /* XXX: see if in some cases it could be faster to invalidate all
        the code */
-    tb = p->first_tb;
-    while (tb != NULL) {
-        n = (uintptr_t)tb & 3;
-        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
-        tb_next = tb->page_next[n];
+    PAGE_FOR_EACH_TB(p, tb, n) {
         /* NOTE: this is subtle as a TB may span two physical pages */
         if (n == 0) {
             /* NOTE: tb_end may be after the end of the page, but
@@ -1476,7 +1480,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
 #endif /* TARGET_HAS_PRECISE_SMC */
             tb_phys_invalidate(tb, -1);
         }
-        tb = tb_next;
     }
 #if !defined(CONFIG_USER_ONLY)
     /* if no code remaining, no need to continue to use slow writes */
@@ -1570,18 +1573,15 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
     }
 
     tb_lock();
-    tb = p->first_tb;
 #ifdef TARGET_HAS_PRECISE_SMC
-    if (tb && pc != 0) {
+    if (p->first_tb && pc != 0) {
         current_tb = tcg_tb_lookup(pc);
     }
     if (cpu != NULL) {
         env = cpu->env_ptr;
     }
 #endif
-    while (tb != NULL) {
-        n = (uintptr_t)tb & 3;
-        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
+    PAGE_FOR_EACH_TB(p, tb, n) {
 #ifdef TARGET_HAS_PRECISE_SMC
         if (current_tb == tb &&
             (current_tb->cflags & CF_COUNT_MASK) != 1) {
@@ -1598,9 +1598,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
         }
 #endif /* TARGET_HAS_PRECISE_SMC */
         tb_phys_invalidate(tb, addr);
-        tb = tb->page_next[n];
     }
-    p->first_tb = NULL;
+    p->first_tb = (uintptr_t)NULL;
 #ifdef TARGET_HAS_PRECISE_SMC
     if (current_tb_modified) {
         /* Force execution of one insn next time.  */
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 17e08b3..5f7e65a 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -356,7 +356,7 @@ struct TranslationBlock {
     struct TranslationBlock *orig_tb;
     /* first and second physical page containing code. The lower bit
        of the pointer tells the index in page_next[] */
-    struct TranslationBlock *page_next[2];
+    uintptr_t page_next[2];
     tb_page_addr_t page_addr[2];
 
     /* The following data are used to directly call another TB from
-- 
2.7.4

  parent reply	other threads:[~2018-02-27  5:39 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-27  5:39 [Qemu-devel] [PATCH 00/16] tcg: tb_lock removal redux v1 Emilio G. Cota
2018-02-27  5:39 ` [Qemu-devel] [PATCH 01/16] qht: require a default comparison function Emilio G. Cota
2018-02-28 19:02   ` Richard Henderson
2018-03-28 16:21   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 02/16] qht: return existing entry when qht_insert fails Emilio G. Cota
2018-02-28 19:10   ` Richard Henderson
2018-03-28 16:33   ` Alex Bennée
2018-04-05 17:10     ` Emilio G. Cota
2018-02-27  5:39 ` [Qemu-devel] [PATCH 03/16] tcg: track TBs with per-region BST's Emilio G. Cota
2018-02-28 20:53   ` Richard Henderson
2018-03-29  9:54   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 04/16] tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx Emilio G. Cota
2018-02-28 20:55   ` Richard Henderson
2018-03-29 10:06   ` Alex Bennée
2018-04-05 17:18     ` Emilio G. Cota
2018-02-27  5:39 ` Emilio G. Cota [this message]
2018-02-28 21:40   ` [Qemu-devel] [PATCH 05/16] translate-all: iterate over TBs in a page with PAGE_FOR_EACH_TB Richard Henderson
2018-02-28 22:50     ` Emilio G. Cota
2018-02-28 22:53       ` Richard Henderson
2018-02-27  5:39 ` [Qemu-devel] [PATCH 06/16] translate-all: make l1_map lockless Emilio G. Cota
2018-02-28 22:15   ` Richard Henderson
2018-03-29 10:16   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 07/16] translate-all: remove hole in PageDesc Emilio G. Cota
2018-02-28 22:17   ` Richard Henderson
2018-03-29 10:17   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 08/16] translate-all: work page-by-page in tb_invalidate_phys_range_1 Emilio G. Cota
2018-02-28 22:23   ` Richard Henderson
2018-03-29 10:10   ` Alex Bennée
2018-03-29 10:17   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 09/16] translate-all: move tb_invalidate_phys_page_range up in the file Emilio G. Cota
2018-02-28 22:24   ` Richard Henderson
2018-03-29 10:08   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 10/16] translate-all: use per-page locking in !user-mode Emilio G. Cota
2018-03-29 14:55   ` Alex Bennée
2018-04-06  0:43     ` Emilio G. Cota
2018-02-27  5:39 ` [Qemu-devel] [PATCH 11/16] translate-all: add page_collection assertions Emilio G. Cota
2018-03-29 15:08   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 12/16] translate-all: discard TB when tb_link_page returns an existing matching TB Emilio G. Cota
2018-03-29 15:19   ` Alex Bennée
2018-04-06  1:23     ` Emilio G. Cota
2018-02-27  5:39 ` [Qemu-devel] [PATCH 13/16] translate-all: protect TB jumps with a per-destination-TB lock Emilio G. Cota
2018-02-27 11:33   ` Paolo Bonzini
2018-02-27 11:43     ` Laurent Desnogues
2018-02-27 14:31       ` Paolo Bonzini
2018-03-28 15:57   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 14/16] cputlb: remove tb_lock from tlb_flush functions Emilio G. Cota
2018-03-29 15:46   ` Alex Bennée
2018-02-27  5:39 ` [Qemu-devel] [PATCH 15/16] translate-all: remove tb_lock mention from cpu_restore_state_from_tb Emilio G. Cota
2018-03-29 16:06   ` Alex Bennée
2018-04-06  1:40     ` Emilio G. Cota
2018-02-27  5:39 ` [Qemu-devel] [PATCH 16/16] tcg: remove tb_lock Emilio G. Cota
2018-03-29 16:15   ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1519709965-29833-6-git-send-email-cota@braap.org \
    --to=cota@braap.org \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.