All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 011/114] accel/tcg: Pass length argument to tlb_flush_range_locked()
Date: Tue, 25 May 2021 16:01:41 +0100	[thread overview]
Message-ID: <20210525150324.32370-12-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210525150324.32370-1-peter.maydell@linaro.org>

From: Richard Henderson <richard.henderson@linaro.org>

Rename tlb_flush_page_bits_locked() -> tlb_flush_range_locked(), and
have callers pass a length argument (currently TARGET_PAGE_SIZE) via
the TLBFlushPageBitsByMMUIdxData structure.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20210509151618.2331764-3-f4bug@amsat.org
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
[PMD: Split from bigger patch]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 accel/tcg/cputlb.c | 48 +++++++++++++++++++++++++++++++---------------
 1 file changed, 33 insertions(+), 15 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f616b58a898..df5d5dbf879 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -707,8 +707,9 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
 }
 
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
-                                       target_ulong page, unsigned bits)
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
+                                   target_ulong addr, target_ulong len,
+                                   unsigned bits)
 {
     CPUTLBDesc *d = &env_tlb(env)->d[midx];
     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
@@ -718,20 +719,26 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
      * If @bits is smaller than the tlb size, there may be multiple entries
      * within the TLB; otherwise all addresses that match under @mask hit
      * the same TLB entry.
-     *
      * TODO: Perhaps allow bits to be a few bits less than the size.
      * For now, just flush the entire TLB.
+     *
+     * If @len is larger than the tlb size, then it will take longer to
+     * test all of the entries in the TLB than it will to flush it all.
      */
-    if (mask < f->mask) {
+    if (mask < f->mask || len > f->mask) {
         tlb_debug("forcing full flush midx %d ("
-                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
-                  midx, page, mask);
+                  TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+                  midx, addr, mask, len);
         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
         return;
     }
 
-    /* Check if we need to flush due to large pages.  */
-    if ((page & d->large_page_mask) == d->large_page_addr) {
+    /*
+     * Check if we need to flush due to large pages.
+     * Because large_page_mask contains all 1's from the msb,
+     * we only need to test the end of the range.
+     */
+    if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
         tlb_debug("forcing full flush midx %d ("
                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                   midx, d->large_page_addr, d->large_page_mask);
@@ -739,14 +746,20 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
         return;
     }
 
-    if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
-        tlb_n_used_entries_dec(env, midx);
+    for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
+        target_ulong page = addr + i;
+        CPUTLBEntry *entry = tlb_entry(env, midx, page);
+
+        if (tlb_flush_entry_mask_locked(entry, page, mask)) {
+            tlb_n_used_entries_dec(env, midx);
+        }
+        tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
     }
-    tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
 }
 
 typedef struct {
     target_ulong addr;
+    target_ulong len;
     uint16_t idxmap;
     uint16_t bits;
 } TLBFlushPageBitsByMMUIdxData;
@@ -760,18 +773,20 @@ tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
 
     assert_cpu_is_self(cpu);
 
-    tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
-              d.addr, d.bits, d.idxmap);
+    tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+              d.addr, d.bits, d.len, d.idxmap);
 
     qemu_spin_lock(&env_tlb(env)->c.lock);
     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
         if ((d.idxmap >> mmu_idx) & 1) {
-            tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
+            tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
         }
     }
     qemu_spin_unlock(&env_tlb(env)->c.lock);
 
-    tb_flush_jmp_cache(cpu, d.addr);
+    for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
+        tb_flush_jmp_cache(cpu, d.addr + i);
+    }
 }
 
 static bool encode_pbm_to_runon(run_on_cpu_data *out,
@@ -829,6 +844,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
@@ -865,6 +881,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
@@ -908,6 +925,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 
     /* This should already be page aligned */
     d.addr = addr & TARGET_PAGE_MASK;
+    d.len = TARGET_PAGE_SIZE;
     d.idxmap = idxmap;
     d.bits = bits;
 
-- 
2.20.1



  parent reply	other threads:[~2021-05-25 15:14 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-25 15:01 [PULL 000/114] target-arm queue Peter Maydell
2021-05-25 15:01 ` [PULL 001/114] hw/arm/smmuv3: Another range invalidation fix Peter Maydell
2021-05-25 15:01 ` [PULL 002/114] hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic Peter Maydell
2021-05-25 15:01 ` [PULL 003/114] hw/arm/mps2-tz: Don't duplicate modelling of SRAM in AN524 Peter Maydell
2021-05-25 15:01 ` [PULL 004/114] hw/arm/mps2-tz: Make SRAM_ADDR_WIDTH board-specific Peter Maydell
2021-05-25 15:01 ` [PULL 005/114] hw/arm/armsse.c: Correct modelling of SSE-300 internal SRAMs Peter Maydell
2021-05-25 15:01 ` [PULL 006/114] hw/arm/armsse: Convert armsse_realize() to use ERRP_GUARD Peter Maydell
2021-05-25 15:01 ` [PULL 007/114] hw/arm/mps2-tz: Allow board to specify a boot RAM size Peter Maydell
2021-05-25 15:01 ` [PULL 008/114] hw/arm: Model TCMs in the SSE-300, not the AN547 Peter Maydell
2021-05-25 15:01 ` [PULL 009/114] target/arm: Use correct SP in M-profile exception return Peter Maydell
2021-05-25 15:01 ` [PULL 010/114] accel/tcg: Replace g_new() + memcpy() by g_memdup() Peter Maydell
2021-05-25 15:01 ` Peter Maydell [this message]
2021-05-25 15:01 ` [PULL 012/114] accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData Peter Maydell
2021-05-25 15:01 ` [PULL 013/114] accel/tcg: Remove {encode,decode}_pbm_to_runon Peter Maydell
2021-05-25 15:01 ` [PULL 014/114] accel/tcg: Add tlb_flush_range_by_mmuidx() Peter Maydell
2021-05-25 15:01 ` [PULL 015/114] accel/tcg: Add tlb_flush_range_by_mmuidx_all_cpus() Peter Maydell
2021-05-25 15:01 ` [PULL 016/114] accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced() Peter Maydell
2021-05-25 15:01 ` [PULL 017/114] accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0 Peter Maydell
2021-05-25 15:01 ` [PULL 018/114] accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1] Peter Maydell
2021-05-25 15:01 ` [PULL 019/114] target/arm: Add support for FEAT_TLBIRANGE Peter Maydell
2021-05-25 15:01 ` [PULL 020/114] target/arm: Add support for FEAT_TLBIOS Peter Maydell
2021-05-25 15:01 ` [PULL 021/114] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type Peter Maydell
2021-05-25 15:01 ` [PULL 022/114] disas/libvixl: Protect C system header for C++ compiler Peter Maydell
2021-05-25 15:01 ` [PULL 023/114] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Peter Maydell
2021-05-25 15:01 ` [PULL 024/114] target/arm: Implement SVE2 Integer Multiply - Unpredicated Peter Maydell
2021-05-25 15:01 ` [PULL 025/114] target/arm: Implement SVE2 integer pairwise add and accumulate long Peter Maydell
2021-05-25 15:01 ` [PULL 026/114] target/arm: Implement SVE2 integer unary operations (predicated) Peter Maydell
2021-05-25 15:01 ` [PULL 027/114] target/arm: Split out saturating/rounding shifts from neon Peter Maydell
2021-05-25 15:01 ` [PULL 028/114] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Peter Maydell
2021-05-25 15:01 ` [PULL 029/114] target/arm: Implement SVE2 integer halving add/subtract (predicated) Peter Maydell
2021-05-25 15:02 ` [PULL 030/114] target/arm: Implement SVE2 integer pairwise arithmetic Peter Maydell
2021-05-25 15:02 ` [PULL 031/114] target/arm: Implement SVE2 saturating add/subtract (predicated) Peter Maydell
2021-05-25 15:02 ` [PULL 032/114] target/arm: Implement SVE2 integer add/subtract long Peter Maydell
2021-05-25 15:02 ` [PULL 033/114] target/arm: Implement SVE2 integer add/subtract interleaved long Peter Maydell
2021-05-25 15:02 ` [PULL 034/114] target/arm: Implement SVE2 integer add/subtract wide Peter Maydell
2021-05-25 15:02 ` [PULL 035/114] target/arm: Implement SVE2 integer multiply long Peter Maydell
2021-05-25 15:02 ` [PULL 036/114] target/arm: Implement SVE2 PMULLB, PMULLT Peter Maydell
2021-05-25 15:02 ` [PULL 037/114] target/arm: Implement SVE2 bitwise shift left long Peter Maydell
2021-05-25 15:02 ` [PULL 038/114] target/arm: Implement SVE2 bitwise exclusive-or interleaved Peter Maydell
2021-05-25 15:02 ` [PULL 039/114] target/arm: Implement SVE2 bitwise permute Peter Maydell
2021-05-25 15:02 ` [PULL 040/114] target/arm: Implement SVE2 complex integer add Peter Maydell
2021-05-25 15:02 ` [PULL 041/114] target/arm: Implement SVE2 integer absolute difference and accumulate long Peter Maydell
2021-05-25 15:02 ` [PULL 042/114] target/arm: Implement SVE2 integer add/subtract long with carry Peter Maydell
2021-05-25 15:02 ` [PULL 043/114] target/arm: Implement SVE2 bitwise shift right and accumulate Peter Maydell
2021-05-25 15:02 ` [PULL 044/114] target/arm: Implement SVE2 bitwise shift and insert Peter Maydell
2021-05-25 15:02 ` [PULL 045/114] target/arm: Implement SVE2 integer absolute difference and accumulate Peter Maydell
2021-05-25 15:02 ` [PULL 046/114] target/arm: Implement SVE2 saturating extract narrow Peter Maydell
2021-05-25 15:02 ` [PULL 047/114] target/arm: Implement SVE2 floating-point pairwise Peter Maydell
2021-05-25 15:02 ` [PULL 048/114] target/arm: Implement SVE2 SHRN, RSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 049/114] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Peter Maydell
2021-05-25 15:02 ` [PULL 050/114] target/arm: Implement SVE2 UQSHRN, UQRSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 051/114] target/arm: Implement SVE2 SQSHRN, SQRSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 052/114] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Peter Maydell
2021-05-25 15:02 ` [PULL 053/114] target/arm: Implement SVE2 WHILERW, WHILEWR Peter Maydell
2021-05-25 15:02 ` [PULL 054/114] target/arm: Implement SVE2 bitwise ternary operations Peter Maydell
2021-05-25 15:02 ` [PULL 055/114] target/arm: Implement SVE2 MATCH, NMATCH Peter Maydell
2021-05-25 15:02 ` [PULL 056/114] target/arm: Implement SVE2 saturating multiply-add long Peter Maydell
2021-05-25 15:02 ` [PULL 057/114] target/arm: Implement SVE2 saturating multiply-add high Peter Maydell
2021-05-25 15:02 ` [PULL 058/114] target/arm: Implement SVE2 integer multiply-add long Peter Maydell
2021-05-25 15:02 ` [PULL 059/114] target/arm: Implement SVE2 complex integer multiply-add Peter Maydell
2021-05-25 15:02 ` [PULL 060/114] target/arm: Implement SVE2 ADDHNB, ADDHNT Peter Maydell
2021-05-25 15:02 ` [PULL 061/114] target/arm: Implement SVE2 RADDHNB, RADDHNT Peter Maydell
2021-05-25 15:02 ` [PULL 062/114] target/arm: Implement SVE2 SUBHNB, SUBHNT Peter Maydell
2021-05-25 15:02 ` [PULL 063/114] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Peter Maydell
2021-05-25 15:02 ` [PULL 064/114] target/arm: Implement SVE2 HISTCNT, HISTSEG Peter Maydell
2021-05-25 15:02 ` [PULL 065/114] target/arm: Implement SVE2 XAR Peter Maydell
2021-05-25 15:02 ` [PULL 066/114] target/arm: Implement SVE2 scatter store insns Peter Maydell
2021-05-25 15:02 ` [PULL 067/114] target/arm: Implement SVE2 gather load insns Peter Maydell
2021-05-25 15:02 ` [PULL 068/114] target/arm: Implement SVE2 FMMLA Peter Maydell
2021-05-25 15:02 ` [PULL 069/114] target/arm: Implement SVE2 SPLICE, EXT Peter Maydell
2021-05-25 15:02 ` [PULL 070/114] target/arm: Use correct output type for gvec_sdot_*_b Peter Maydell
2021-05-25 15:02 ` [PULL 071/114] target/arm: Pass separate addend to {U, S}DOT helpers Peter Maydell
2021-05-25 15:02 ` [PULL 072/114] target/arm: Pass separate addend to FCMLA helpers Peter Maydell
2021-05-25 15:02 ` [PULL 073/114] target/arm: Split out formats for 2 vectors + 1 index Peter Maydell
2021-05-25 15:02 ` [PULL 074/114] target/arm: Split out formats for 3 " Peter Maydell
2021-05-25 15:02 ` [PULL 075/114] target/arm: Implement SVE2 integer multiply (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 076/114] target/arm: Implement SVE2 integer multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 077/114] target/arm: Implement SVE2 saturating multiply-add high (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 078/114] target/arm: Implement SVE2 saturating multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 079/114] target/arm: Implement SVE2 saturating multiply (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 080/114] target/arm: Implement SVE2 signed saturating doubling multiply high Peter Maydell
2021-05-25 15:02 ` [PULL 081/114] target/arm: Implement SVE2 saturating multiply high (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 082/114] target/arm: Implement SVE2 multiply-add long (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 083/114] target/arm: Implement SVE2 integer multiply " Peter Maydell
2021-05-25 15:02 ` [PULL 084/114] target/arm: Implement SVE2 complex integer multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 085/114] target/arm: Implement SVE2 complex integer dot product Peter Maydell
2021-05-25 15:02 ` [PULL 086/114] target/arm: Macroize helper_gvec_{s,u}dot_{b,h} Peter Maydell
2021-05-25 15:02 ` [PULL 087/114] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h} Peter Maydell
2021-05-25 15:02 ` [PULL 088/114] target/arm: Implement SVE mixed sign dot product (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 089/114] target/arm: Implement SVE mixed sign dot product Peter Maydell
2021-05-25 15:03 ` [PULL 090/114] target/arm: Implement SVE2 crypto unary operations Peter Maydell
2021-05-25 15:03 ` [PULL 091/114] target/arm: Implement SVE2 crypto destructive binary operations Peter Maydell
2021-05-25 15:03 ` [PULL 092/114] target/arm: Implement SVE2 crypto constructive " Peter Maydell
2021-05-25 15:03 ` [PULL 093/114] target/arm: Implement SVE2 TBL, TBX Peter Maydell
2021-05-25 15:03 ` [PULL 094/114] target/arm: Implement SVE2 FCVTNT Peter Maydell
2021-05-25 16:30 ` [PULL 000/114] target-arm queue Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210525150324.32370-12-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --subject='Re: [PULL 011/114] accel/tcg: Pass length argument to tlb_flush_range_locked()' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.