All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 019/114] target/arm: Add support for FEAT_TLBIRANGE
Date: Tue, 25 May 2021 16:01:49 +0100	[thread overview]
Message-ID: <20210525150324.32370-20-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210525150324.32370-1-peter.maydell@linaro.org>

From: Rebecca Cran <rebecca@nuviainc.com>

ARMv8.4 adds the mandatory FEAT_TLBIRANGE. It provides TLBI
maintenance instructions that apply to a range of input addresses.

Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210512182337.18563-2-rebecca@nuviainc.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/cpu.h    |   5 +
 target/arm/helper.c | 281 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 286 insertions(+)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 616b3932534..5802798c306 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4071,6 +4071,11 @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
     return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
 }
 
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+}
+
 static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
 {
     return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 3b365a78cbc..4adb017f81a 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4759,6 +4759,172 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                                   ARMMMUIdxBit_SE3, bits);
 }
 
+#ifdef TARGET_AARCH64
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
+                                           uint64_t value)
+{
+    unsigned int page_shift;
+    unsigned int page_size_granule;
+    uint64_t num;
+    uint64_t scale;
+    uint64_t exponent;
+    uint64_t length;
+
+    num = extract64(value, 39, 4);
+    scale = extract64(value, 44, 2);
+    page_size_granule = extract64(value, 46, 2);
+
+    page_shift = page_size_granule * 2 + 12;
+
+    if (page_size_granule == 0) {
+        qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+                      page_size_granule);
+        return 0;
+    }
+
+    exponent = (5 * scale) + 1;
+    length = (num + 1) << (exponent + page_shift);
+
+    return length;
+}
+
+static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
+                                        bool two_ranges)
+{
+    /* TODO: ARMv8.7 FEAT_LPA2 */
+    uint64_t pageaddr;
+
+    if (two_ranges) {
+        pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+    } else {
+        pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+    }
+
+    return pageaddr;
+}
+
+static void do_rvae_write(CPUARMState *env, uint64_t value,
+                          int idxmap, bool synced)
+{
+    ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
+    bool two_ranges = regime_has_2_ranges(one_idx);
+    uint64_t baseaddr, length;
+    int bits;
+
+    baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
+    length = tlbi_aa64_range_get_length(env, value);
+    bits = tlbbits_for_regime(env, one_idx, baseaddr);
+
+    if (synced) {
+        tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
+                                                  baseaddr,
+                                                  length,
+                                                  idxmap,
+                                                  bits);
+    } else {
+        tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
+                                  length, idxmap, bits);
+    }
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env,
+                                  const ARMCPRegInfo *ri,
+                                  uint64_t value)
+{
+    /*
+     * Invalidate by VA range, EL1&0.
+     * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+     * since we don't support flush-for-specific-ASID-only or
+     * flush-last-level-only.
+     */
+
+    do_rvae_write(env, value, vae1_tlbmask(env),
+                  tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env,
+                                    const ARMCPRegInfo *ri,
+                                    uint64_t value)
+{
+    /*
+     * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+     * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+     * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+     * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+     * shareable specific flushes.
+     */
+
+    do_rvae_write(env, value, vae1_tlbmask(env), true);
+}
+
+static int vae2_tlbmask(CPUARMState *env)
+{
+    return (arm_is_secure_below_el3(env)
+            ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env,
+                                  const ARMCPRegInfo *ri,
+                                  uint64_t value)
+{
+    /*
+     * Invalidate by VA range, EL2.
+     * Currently handles all of RVAE2 and RVALE2,
+     * since we don't support flush-for-specific-ASID-only or
+     * flush-last-level-only.
+     */
+
+    do_rvae_write(env, value, vae2_tlbmask(env),
+                  tlb_force_broadcast(env));
+
+
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env,
+                                    const ARMCPRegInfo *ri,
+                                    uint64_t value)
+{
+    /*
+     * Invalidate by VA range, Inner/Outer Shareable, EL2.
+     * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+     * since we don't support flush-for-specific-ASID-only,
+     * flush-last-level-only or inner/outer shareable specific flushes.
+     */
+
+    do_rvae_write(env, value, vae2_tlbmask(env), true);
+
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env,
+                                  const ARMCPRegInfo *ri,
+                                  uint64_t value)
+{
+    /*
+     * Invalidate by VA range, EL3.
+     * Currently handles all of RVAE3 and RVALE3,
+     * since we don't support flush-for-specific-ASID-only or
+     * flush-last-level-only.
+     */
+
+    do_rvae_write(env, value, ARMMMUIdxBit_SE3,
+                  tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env,
+                                    const ARMCPRegInfo *ri,
+                                    uint64_t value)
+{
+    /*
+     * Invalidate by VA range, EL3, Inner/Outer Shareable.
+     * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+     * since we don't support flush-for-specific-ASID-only,
+     * flush-last-level-only or inner/outer specific flushes.
+     */
+
+    do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
+}
+#endif
+
 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
                                       bool isread)
 {
@@ -6920,6 +7086,118 @@ static const ARMCPRegInfo pauth_reginfo[] = {
     REGINFO_SENTINEL
 };
 
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+    { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+   { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+   { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1is_write },
+    { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1_write },
+    { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1_write },
+   { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1_write },
+    { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+      .access = PL1_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae1_write },
+    { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+      .access = PL2_W, .type = ARM_CP_NOP },
+    { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+      .access = PL2_W, .type = ARM_CP_NOP },
+    { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2is_write },
+   { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2is_write },
+    { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+      .access = PL2_W, .type = ARM_CP_NOP },
+   { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+      .access = PL2_W, .type = ARM_CP_NOP },
+   { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2is_write },
+   { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2is_write },
+    { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2_write },
+   { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+      .access = PL2_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae2_write },
+   { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3is_write },
+   { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3is_write },
+   { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3is_write },
+   { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3is_write },
+   { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3_write },
+   { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+      .access = PL3_W, .type = ARM_CP_NO_RAW,
+      .writefn = tlbi_aa64_rvae3_write },
+    REGINFO_SENTINEL
+};
+
 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
 {
     Error *err = NULL;
@@ -8289,6 +8567,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
     if (cpu_isar_feature(aa64_rndr, cpu)) {
         define_arm_cp_regs(cpu, rndr_reginfo);
     }
+    if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+        define_arm_cp_regs(cpu, tlbirange_reginfo);
+    }
 #ifndef CONFIG_USER_ONLY
     /* Data Cache clean instructions up to PoP */
     if (cpu_isar_feature(aa64_dcpop, cpu)) {
-- 
2.20.1



  parent reply	other threads:[~2021-05-25 15:15 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-25 15:01 [PULL 000/114] target-arm queue Peter Maydell
2021-05-25 15:01 ` [PULL 001/114] hw/arm/smmuv3: Another range invalidation fix Peter Maydell
2021-05-25 15:01 ` [PULL 002/114] hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic Peter Maydell
2021-05-25 15:01 ` [PULL 003/114] hw/arm/mps2-tz: Don't duplicate modelling of SRAM in AN524 Peter Maydell
2021-05-25 15:01 ` [PULL 004/114] hw/arm/mps2-tz: Make SRAM_ADDR_WIDTH board-specific Peter Maydell
2021-05-25 15:01 ` [PULL 005/114] hw/arm/armsse.c: Correct modelling of SSE-300 internal SRAMs Peter Maydell
2021-05-25 15:01 ` [PULL 006/114] hw/arm/armsse: Convert armsse_realize() to use ERRP_GUARD Peter Maydell
2021-05-25 15:01 ` [PULL 007/114] hw/arm/mps2-tz: Allow board to specify a boot RAM size Peter Maydell
2021-05-25 15:01 ` [PULL 008/114] hw/arm: Model TCMs in the SSE-300, not the AN547 Peter Maydell
2021-05-25 15:01 ` [PULL 009/114] target/arm: Use correct SP in M-profile exception return Peter Maydell
2021-05-25 15:01 ` [PULL 010/114] accel/tcg: Replace g_new() + memcpy() by g_memdup() Peter Maydell
2021-05-25 15:01 ` [PULL 011/114] accel/tcg: Pass length argument to tlb_flush_range_locked() Peter Maydell
2021-05-25 15:01 ` [PULL 012/114] accel/tlb: Rename TLBFlushPageBitsByMMUIdxData -> TLBFlushRangeData Peter Maydell
2021-05-25 15:01 ` [PULL 013/114] accel/tcg: Remove {encode,decode}_pbm_to_runon Peter Maydell
2021-05-25 15:01 ` [PULL 014/114] accel/tcg: Add tlb_flush_range_by_mmuidx() Peter Maydell
2021-05-25 15:01 ` [PULL 015/114] accel/tcg: Add tlb_flush_range_by_mmuidx_all_cpus() Peter Maydell
2021-05-25 15:01 ` [PULL 016/114] accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced() Peter Maydell
2021-05-25 15:01 ` [PULL 017/114] accel/tcg: Rename tlb_flush_page_bits -> range]_by_mmuidx_async_0 Peter Maydell
2021-05-25 15:01 ` [PULL 018/114] accel/tlb: Rename tlb_flush_[page_bits > range]_by_mmuidx_async_[2 > 1] Peter Maydell
2021-05-25 15:01 ` Peter Maydell [this message]
2021-05-25 15:01 ` [PULL 020/114] target/arm: Add support for FEAT_TLBIOS Peter Maydell
2021-05-25 15:01 ` [PULL 021/114] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type Peter Maydell
2021-05-25 15:01 ` [PULL 022/114] disas/libvixl: Protect C system header for C++ compiler Peter Maydell
2021-05-25 15:01 ` [PULL 023/114] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Peter Maydell
2021-05-25 15:01 ` [PULL 024/114] target/arm: Implement SVE2 Integer Multiply - Unpredicated Peter Maydell
2021-05-25 15:01 ` [PULL 025/114] target/arm: Implement SVE2 integer pairwise add and accumulate long Peter Maydell
2021-05-25 15:01 ` [PULL 026/114] target/arm: Implement SVE2 integer unary operations (predicated) Peter Maydell
2021-05-25 15:01 ` [PULL 027/114] target/arm: Split out saturating/rounding shifts from neon Peter Maydell
2021-05-25 15:01 ` [PULL 028/114] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Peter Maydell
2021-05-25 15:01 ` [PULL 029/114] target/arm: Implement SVE2 integer halving add/subtract (predicated) Peter Maydell
2021-05-25 15:02 ` [PULL 030/114] target/arm: Implement SVE2 integer pairwise arithmetic Peter Maydell
2021-05-25 15:02 ` [PULL 031/114] target/arm: Implement SVE2 saturating add/subtract (predicated) Peter Maydell
2021-05-25 15:02 ` [PULL 032/114] target/arm: Implement SVE2 integer add/subtract long Peter Maydell
2021-05-25 15:02 ` [PULL 033/114] target/arm: Implement SVE2 integer add/subtract interleaved long Peter Maydell
2021-05-25 15:02 ` [PULL 034/114] target/arm: Implement SVE2 integer add/subtract wide Peter Maydell
2021-05-25 15:02 ` [PULL 035/114] target/arm: Implement SVE2 integer multiply long Peter Maydell
2021-05-25 15:02 ` [PULL 036/114] target/arm: Implement SVE2 PMULLB, PMULLT Peter Maydell
2021-05-25 15:02 ` [PULL 037/114] target/arm: Implement SVE2 bitwise shift left long Peter Maydell
2021-05-25 15:02 ` [PULL 038/114] target/arm: Implement SVE2 bitwise exclusive-or interleaved Peter Maydell
2021-05-25 15:02 ` [PULL 039/114] target/arm: Implement SVE2 bitwise permute Peter Maydell
2021-05-25 15:02 ` [PULL 040/114] target/arm: Implement SVE2 complex integer add Peter Maydell
2021-05-25 15:02 ` [PULL 041/114] target/arm: Implement SVE2 integer absolute difference and accumulate long Peter Maydell
2021-05-25 15:02 ` [PULL 042/114] target/arm: Implement SVE2 integer add/subtract long with carry Peter Maydell
2021-05-25 15:02 ` [PULL 043/114] target/arm: Implement SVE2 bitwise shift right and accumulate Peter Maydell
2021-05-25 15:02 ` [PULL 044/114] target/arm: Implement SVE2 bitwise shift and insert Peter Maydell
2021-05-25 15:02 ` [PULL 045/114] target/arm: Implement SVE2 integer absolute difference and accumulate Peter Maydell
2021-05-25 15:02 ` [PULL 046/114] target/arm: Implement SVE2 saturating extract narrow Peter Maydell
2021-05-25 15:02 ` [PULL 047/114] target/arm: Implement SVE2 floating-point pairwise Peter Maydell
2021-05-25 15:02 ` [PULL 048/114] target/arm: Implement SVE2 SHRN, RSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 049/114] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Peter Maydell
2021-05-25 15:02 ` [PULL 050/114] target/arm: Implement SVE2 UQSHRN, UQRSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 051/114] target/arm: Implement SVE2 SQSHRN, SQRSHRN Peter Maydell
2021-05-25 15:02 ` [PULL 052/114] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Peter Maydell
2021-05-25 15:02 ` [PULL 053/114] target/arm: Implement SVE2 WHILERW, WHILEWR Peter Maydell
2021-05-25 15:02 ` [PULL 054/114] target/arm: Implement SVE2 bitwise ternary operations Peter Maydell
2021-05-25 15:02 ` [PULL 055/114] target/arm: Implement SVE2 MATCH, NMATCH Peter Maydell
2021-05-25 15:02 ` [PULL 056/114] target/arm: Implement SVE2 saturating multiply-add long Peter Maydell
2021-05-25 15:02 ` [PULL 057/114] target/arm: Implement SVE2 saturating multiply-add high Peter Maydell
2021-05-25 15:02 ` [PULL 058/114] target/arm: Implement SVE2 integer multiply-add long Peter Maydell
2021-05-25 15:02 ` [PULL 059/114] target/arm: Implement SVE2 complex integer multiply-add Peter Maydell
2021-05-25 15:02 ` [PULL 060/114] target/arm: Implement SVE2 ADDHNB, ADDHNT Peter Maydell
2021-05-25 15:02 ` [PULL 061/114] target/arm: Implement SVE2 RADDHNB, RADDHNT Peter Maydell
2021-05-25 15:02 ` [PULL 062/114] target/arm: Implement SVE2 SUBHNB, SUBHNT Peter Maydell
2021-05-25 15:02 ` [PULL 063/114] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Peter Maydell
2021-05-25 15:02 ` [PULL 064/114] target/arm: Implement SVE2 HISTCNT, HISTSEG Peter Maydell
2021-05-25 15:02 ` [PULL 065/114] target/arm: Implement SVE2 XAR Peter Maydell
2021-05-25 15:02 ` [PULL 066/114] target/arm: Implement SVE2 scatter store insns Peter Maydell
2021-05-25 15:02 ` [PULL 067/114] target/arm: Implement SVE2 gather load insns Peter Maydell
2021-05-25 15:02 ` [PULL 068/114] target/arm: Implement SVE2 FMMLA Peter Maydell
2021-05-25 15:02 ` [PULL 069/114] target/arm: Implement SVE2 SPLICE, EXT Peter Maydell
2021-05-25 15:02 ` [PULL 070/114] target/arm: Use correct output type for gvec_sdot_*_b Peter Maydell
2021-05-25 15:02 ` [PULL 071/114] target/arm: Pass separate addend to {U, S}DOT helpers Peter Maydell
2021-05-25 15:02 ` [PULL 072/114] target/arm: Pass separate addend to FCMLA helpers Peter Maydell
2021-05-25 15:02 ` [PULL 073/114] target/arm: Split out formats for 2 vectors + 1 index Peter Maydell
2021-05-25 15:02 ` [PULL 074/114] target/arm: Split out formats for 3 " Peter Maydell
2021-05-25 15:02 ` [PULL 075/114] target/arm: Implement SVE2 integer multiply (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 076/114] target/arm: Implement SVE2 integer multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 077/114] target/arm: Implement SVE2 saturating multiply-add high (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 078/114] target/arm: Implement SVE2 saturating multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 079/114] target/arm: Implement SVE2 saturating multiply (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 080/114] target/arm: Implement SVE2 signed saturating doubling multiply high Peter Maydell
2021-05-25 15:02 ` [PULL 081/114] target/arm: Implement SVE2 saturating multiply high (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 082/114] target/arm: Implement SVE2 multiply-add long (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 083/114] target/arm: Implement SVE2 integer multiply " Peter Maydell
2021-05-25 15:02 ` [PULL 084/114] target/arm: Implement SVE2 complex integer multiply-add (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 085/114] target/arm: Implement SVE2 complex integer dot product Peter Maydell
2021-05-25 15:02 ` [PULL 086/114] target/arm: Macroize helper_gvec_{s,u}dot_{b,h} Peter Maydell
2021-05-25 15:02 ` [PULL 087/114] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h} Peter Maydell
2021-05-25 15:02 ` [PULL 088/114] target/arm: Implement SVE mixed sign dot product (indexed) Peter Maydell
2021-05-25 15:02 ` [PULL 089/114] target/arm: Implement SVE mixed sign dot product Peter Maydell
2021-05-25 15:03 ` [PULL 090/114] target/arm: Implement SVE2 crypto unary operations Peter Maydell
2021-05-25 15:03 ` [PULL 091/114] target/arm: Implement SVE2 crypto destructive binary operations Peter Maydell
2021-05-25 15:03 ` [PULL 092/114] target/arm: Implement SVE2 crypto constructive " Peter Maydell
2021-05-25 15:03 ` [PULL 093/114] target/arm: Implement SVE2 TBL, TBX Peter Maydell
2021-05-25 15:03 ` [PULL 094/114] target/arm: Implement SVE2 FCVTNT Peter Maydell
2021-05-25 16:30 ` [PULL 000/114] target-arm queue Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210525150324.32370-20-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --subject='Re: [PULL 019/114] target/arm: Add support for FEAT_TLBIRANGE' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.