All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Philippe Mathieu-Daudé" <f4bug@amsat.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Philippe Mathieu-Daudé" <f4bug@amsat.org>
Subject: [PATCH 2/9] accel/tcg: Pass length argument to tlb_flush_range_locked()
Date: Sun,  9 May 2021 17:16:11 +0200	[thread overview]
Message-ID: <20210509151618.2331764-3-f4bug@amsat.org> (raw)
In-Reply-To: <20210509151618.2331764-1-f4bug@amsat.org>

From: Richard Henderson <richard.henderson@linaro.org>

Rename tlb_flush_page_bits_locked() -> tlb_flush_range_locked(), and
have callers pass a length argument (currently TARGET_PAGE_SIZE) via
the TLBFlushPageBitsByMMUIdxData structure.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
[PMD: Split from bigger patch]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
---
 accel/tcg/cputlb.c | 48 +++++++++++++++++++++++++++++++---------------
 1 file changed, 33 insertions(+), 15 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f616b58a898..df5d5dbf879 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -707,8 +707,9 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
 }
 
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
-                                       target_ulong page, unsigned bits)
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
+                                   target_ulong addr, target_ulong len,
+                                   unsigned bits)
 {
     CPUTLBDesc *d = &env_tlb(env)->d[midx];
     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
@@ -718,20 +719,26 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
      * If @bits is smaller than the tlb size, there may be multiple entries
      * within the TLB; otherwise all addresses that match under @mask hit
      * the same TLB entry.
-     *
      * TODO: Perhaps allow bits to be a few bits less than the size.
      * For now, just flush the entire TLB.
+     *
+     * If @len is larger than the tlb size, then it will take longer to
+     * test all of the entries in the TLB than it will to flush it all.
      */
-    if (mask < f->mask) {
+    if (mask < f->mask || len > f->mask) {
         tlb_debug("forcing full flush midx %d ("
-                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
-                  midx, page, mask);
+                  TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+                  midx, addr, mask, len);
         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
         return;
     }
 
-    /* Check if we need to flush due to large pages.  */
-    if ((page & d->large_page_mask) == d->large_page_addr) {
+    /*
+     * Check if we need to flush due to large pages.
+     * Because large_page_mask contains all 1's from the msb,
+     * we only need to test the end of the range.
+     */
+    if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
         tlb_debug("forcing full flush midx %d ("
                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                   midx, d->large_page_addr, d->large_page_mask);
@@ -739,14 +746,20 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
         return;
     }
 
-    if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
-        tlb_n_used_entries_dec(env, midx);
+    for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
+        target_ulong page = addr + i;
+        CPUTLBEntry *entry = tlb_entry(env, midx, page);
+
+        if (tlb_flush_entry_mask_locked(entry, page, mask)) {
+            tlb_n_used_entries_dec(env, midx);
+        }
+        tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
     }
-    tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
 }
 
 typedef struct {
     target_ulong addr;
+    target_ulong len;
     uint16_t idxmap;
     uint16_t bits;
 } TLBFlushPageBitsByMMUIdxData;
@@ -760,18 +773,20 @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
 
     assert_cpu_is_self(cpu);
 
-    tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
-              d.addr, d.bits, d.idxmap);
+    tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+              d.addr, d.bits, d.len, d.idxmap);
 
     qemu_spin_lock(&env_tlb(env)->c.lock);
     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
         if ((d.idxmap >> mmu_idx) & 1) {
-            tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
+            tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
         }
     }
     qemu_spin_unlock(&env_tlb(env)->c.lock);
 
-    tb_flush_jmp_cache(cpu, d.addr);
+    for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
+        tb_flush_jmp_cache(cpu, d.addr + i);
+    }
 }
 
 static bool encode_pbm_to_runon(run_on_cpu_data *out,
@@ -829,6 +844,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
@@ -865,6 +881,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
@@ -908,6 +925,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
-- 
2.26.3



  parent reply	other threads:[~2021-05-09 15:21 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-09 15:16 [PATCH 0/9] accel/tcg: Add tlb_flush interface for a range of pages Philippe Mathieu-Daudé
2021-05-09 15:16 ` [PATCH 1/9] accel/tcg: Replace g_new() + memcpy() by g_memdup() Philippe Mathieu-Daudé
2021-05-25 13:31   ` Richard Henderson
2021-05-09 15:16 ` Philippe Mathieu-Daudé [this message]
2021-05-09 15:16 ` [PATCH 3/9] accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData Philippe Mathieu-Daudé
2021-05-25 13:38   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 4/9] accel/tcg: Add tlb_flush_range_by_mmuidx() Philippe Mathieu-Daudé
2021-05-25 13:42   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 5/9] accel/tcg: Add tlb_flush_page_bits_by_mmuidx_all_cpus() Philippe Mathieu-Daudé
2021-05-25 13:45   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 6/9] accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced() Philippe Mathieu-Daudé
2021-05-25 13:46   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 7/9] accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0 Philippe Mathieu-Daudé
2021-05-25 13:49   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 8/9] accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1] Philippe Mathieu-Daudé
2021-05-25 13:50   ` Richard Henderson
2021-05-09 15:16 ` [PATCH 9/9] accel/tcg: Remove tlb_flush_page_bits_by_mmuidx_async_1() ??? Philippe Mathieu-Daudé
2021-05-25 14:00   ` Richard Henderson
2021-05-09 15:18 ` [PATCH 0/9] accel/tcg: Add tlb_flush interface for a range of pages Philippe Mathieu-Daudé
2021-05-25  7:55   ` [PATCH v2 " Philippe Mathieu-Daudé
2021-05-25  9:55     ` Peter Maydell
2021-05-25 10:18       ` Philippe Mathieu-Daudé
2021-05-25 14:32 ` [PATCH " Peter Maydell
2021-05-27 16:02   ` Philippe Mathieu-Daudé

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210509151618.2331764-3-f4bug@amsat.org \
    --to=f4bug@amsat.org \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.