All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>, qemu-arm@nongnu.org
Subject: [PATCH v7 43/92] target/arm: Implement SVE2 XAR
Date: Mon, 24 May 2021 18:03:09 -0700	[thread overview]
Message-ID: <20210525010358.152808-44-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210525010358.152808-1-richard.henderson@linaro.org>

In addition, use the same vector generator interface for AdvSIMD.
This fixes a bug in which the AdvSIMD insn failed to clear the
high bits of the SVE register.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper-sve.h    |   4 ++
 target/arm/helper.h        |   2 +
 target/arm/translate-a64.h |   3 ++
 target/arm/sve.decode      |   4 ++
 target/arm/sve_helper.c    |  39 ++++++++++++++
 target/arm/translate-a64.c |  25 ++-------
 target/arm/translate-sve.c | 104 +++++++++++++++++++++++++++++++++++++
 target/arm/vec_helper.c    |  12 +++++
 8 files changed, 172 insertions(+), 21 deletions(-)

diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 507a2fea8e..28b8f00201 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2558,6 +2558,10 @@ DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
 
 DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(sve2_xar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 6bb0b0ddc0..23a7ec5638 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -953,6 +953,8 @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
 DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 #ifdef TARGET_AARCH64
 #include "helper-a64.h"
 #include "helper-sve.h"
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 89437276e7..58f50abca4 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -120,5 +120,8 @@ bool disas_sve(DisasContext *, uint32_t);
 
 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                  uint32_t rm_ofs, int64_t shift,
+                  uint32_t opr_sz, uint32_t max_sz);
 
 #endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 8f501a083c..7645587469 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -65,6 +65,7 @@
 &rr_dbm         rd rn dbm
 &rrri           rd rn rm imm
 &rri_esz        rd rn imm esz
+&rrri_esz       rd rn rm imm esz
 &rrr_esz        rd rn rm esz
 &rpr_esz        rd pg rn esz
 &rpr_s          rd pg rn s
@@ -384,6 +385,9 @@ ORR_zzz         00000100 01 1 ..... 001 100 ..... .....         @rd_rn_rm_e0
 EOR_zzz         00000100 10 1 ..... 001 100 ..... .....         @rd_rn_rm_e0
 BIC_zzz         00000100 11 1 ..... 001 100 ..... .....         @rd_rn_rm_e0
 
+XAR             00000100 .. 1 ..... 001 101 rm:5  rd:5   &rrri_esz \
+                rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr
+
 # SVE2 bitwise ternary operations
 EOR3            00000100 00 1 ..... 001 110 ..... .....         @rdn_ra_rm_e0
 BSL             00000100 00 1 ..... 001 111 ..... .....         @rdn_ra_rm_e0
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 662ed80b1c..5b6292929e 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -7202,3 +7202,42 @@ void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
         *(uint64_t *)(vd + i + 8) = out1;
     }
 }
+
+void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+    int shr = simd_data(desc);
+    int shl = 8 - shr;
+    uint64_t mask = dup_const(MO_8, 0xff >> shr);
+    uint64_t *d = vd, *n = vn, *m = vm;
+
+    for (i = 0; i < opr_sz; ++i) {
+        uint64_t t = n[i] ^ m[i];
+        d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+    }
+}
+
+void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+    int shr = simd_data(desc);
+    int shl = 16 - shr;
+    uint64_t mask = dup_const(MO_16, 0xffff >> shr);
+    uint64_t *d = vd, *n = vn, *m = vm;
+
+    for (i = 0; i < opr_sz; ++i) {
+        uint64_t t = n[i] ^ m[i];
+        d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+    }
+}
+
+void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+    int shr = simd_data(desc);
+    uint32_t *d = vd, *n = vn, *m = vm;
+
+    for (i = 0; i < opr_sz; ++i) {
+        d[i] = ror32(n[i] ^ m[i], shr);
+    }
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 0c80d0b505..bdd47208b1 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14349,8 +14349,6 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
     int imm6 = extract32(insn, 10, 6);
     int rn = extract32(insn, 5, 5);
     int rd = extract32(insn, 0, 5);
-    TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
-    int pass;
 
     if (!dc_isar_feature(aa64_sha3, s)) {
         unallocated_encoding(s);
@@ -14361,25 +14359,10 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
         return;
     }
 
-    tcg_op1 = tcg_temp_new_i64();
-    tcg_op2 = tcg_temp_new_i64();
-    tcg_res[0] = tcg_temp_new_i64();
-    tcg_res[1] = tcg_temp_new_i64();
-
-    for (pass = 0; pass < 2; pass++) {
-        read_vec_element(s, tcg_op1, rn, pass, MO_64);
-        read_vec_element(s, tcg_op2, rm, pass, MO_64);
-
-        tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
-        tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
-    }
-    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
-    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
-
-    tcg_temp_free_i64(tcg_op1);
-    tcg_temp_free_i64(tcg_op2);
-    tcg_temp_free_i64(tcg_res[0]);
-    tcg_temp_free_i64(tcg_res[1]);
+    gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
+                 vec_full_reg_offset(s, rn),
+                 vec_full_reg_offset(s, rm), imm6, 16,
+                 vec_full_reg_size(s));
 }
 
 /* Crypto three-reg imm2
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 13f84d14d3..ba39ff84a5 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -340,6 +340,110 @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
     return do_zzz_fn(s, a, tcg_gen_gvec_andc);
 }
 
+static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    uint64_t mask = dup_const(MO_8, 0xff >> sh);
+
+    tcg_gen_xor_i64(t, n, m);
+    tcg_gen_shri_i64(d, t, sh);
+    tcg_gen_shli_i64(t, t, 8 - sh);
+    tcg_gen_andi_i64(d, d, mask);
+    tcg_gen_andi_i64(t, t, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    uint64_t mask = dup_const(MO_16, 0xffff >> sh);
+
+    tcg_gen_xor_i64(t, n, m);
+    tcg_gen_shri_i64(d, t, sh);
+    tcg_gen_shli_i64(t, t, 16 - sh);
+    tcg_gen_andi_i64(d, d, mask);
+    tcg_gen_andi_i64(t, t, ~mask);
+    tcg_gen_or_i64(d, d, t);
+    tcg_temp_free_i64(t);
+}
+
+static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
+{
+    tcg_gen_xor_i32(d, n, m);
+    tcg_gen_rotri_i32(d, d, sh);
+}
+
+static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+    tcg_gen_xor_i64(d, n, m);
+    tcg_gen_rotri_i64(d, d, sh);
+}
+
+static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+                        TCGv_vec m, int64_t sh)
+{
+    tcg_gen_xor_vec(vece, d, n, m);
+    tcg_gen_rotri_vec(vece, d, d, sh);
+}
+
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+                  uint32_t rm_ofs, int64_t shift,
+                  uint32_t opr_sz, uint32_t max_sz)
+{
+    static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
+    static const GVecGen3i ops[4] = {
+        { .fni8 = gen_xar8_i64,
+          .fniv = gen_xar_vec,
+          .fno = gen_helper_sve2_xar_b,
+          .opt_opc = vecop,
+          .vece = MO_8 },
+        { .fni8 = gen_xar16_i64,
+          .fniv = gen_xar_vec,
+          .fno = gen_helper_sve2_xar_h,
+          .opt_opc = vecop,
+          .vece = MO_16 },
+        { .fni4 = gen_xar_i32,
+          .fniv = gen_xar_vec,
+          .fno = gen_helper_sve2_xar_s,
+          .opt_opc = vecop,
+          .vece = MO_32 },
+        { .fni8 = gen_xar_i64,
+          .fniv = gen_xar_vec,
+          .fno = gen_helper_gvec_xar_d,
+          .opt_opc = vecop,
+          .vece = MO_64 }
+    };
+    int esize = 8 << vece;
+
+    /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
+    tcg_debug_assert(shift >= 0);
+    tcg_debug_assert(shift <= esize);
+    shift &= esize - 1;
+
+    if (shift == 0) {
+        /* xar with no rotate devolves to xor. */
+        tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
+    } else {
+        tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
+                        shift, &ops[vece]);
+    }
+}
+
+static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
+{
+    if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+        return false;
+    }
+    if (sve_access_check(s)) {
+        unsigned vsz = vec_full_reg_size(s);
+        gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
+                     vec_full_reg_offset(s, a->rn),
+                     vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
+    }
+    return true;
+}
+
 static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
 {
     if (!dc_isar_feature(aa64_sve2, s)) {
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 19006f50f7..a3d80ecad0 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -2253,3 +2253,15 @@ void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
+
+void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+    int shr = simd_data(desc);
+    uint64_t *d = vd, *n = vn, *m = vm;
+
+    for (i = 0; i < opr_sz; ++i) {
+        d[i] = ror64(n[i] ^ m[i], shr);
+    }
+    clear_tail(d, opr_sz * 8, simd_maxsz(desc));
+}
-- 
2.25.1



  parent reply	other threads:[~2021-05-25  1:40 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-25  1:02 [PATCH v7 00/92] target/arm: Implement SVE2 Richard Henderson
2021-05-25  1:02 ` [PATCH v7 01/92] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2022-07-25  7:05   ` Zenghui Yu via
2022-07-25 14:46     ` Richard Henderson
2021-05-25  1:02 ` [PATCH v7 02/92] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-05-25  1:02 ` [PATCH v7 03/92] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 04/92] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-05-25  1:02 ` [PATCH v7 05/92] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-05-25  1:02 ` [PATCH v7 06/92] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-05-25  1:02 ` [PATCH v7 07/92] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-05-25  1:02 ` [PATCH v7 08/92] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-05-25  1:02 ` [PATCH v7 09/92] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2021-05-25  1:02 ` [PATCH v7 10/92] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 11/92] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 12/92] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-05-25  1:02 ` [PATCH v7 13/92] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 14/92] target/arm: Implement SVE2 PMULLB, PMULLT Richard Henderson
2021-05-25  1:02 ` [PATCH v7 15/92] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 16/92] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-05-25  1:02 ` [PATCH v7 17/92] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-05-25  1:02 ` [PATCH v7 18/92] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-05-25  1:02 ` [PATCH v7 19/92] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-05-25  1:02 ` [PATCH v7 20/92] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-05-25  1:02 ` [PATCH v7 21/92] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-05-25  1:02 ` [PATCH v7 22/92] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-05-25  1:02 ` [PATCH v7 23/92] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-05-25  1:02 ` [PATCH v7 24/92] target/arm: Implement SVE2 saturating extract narrow Richard Henderson
2021-05-25  1:02 ` [PATCH v7 25/92] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-05-25  1:02 ` [PATCH v7 26/92] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-05-25  1:02 ` [PATCH v7 27/92] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-05-25  1:02 ` [PATCH v7 28/92] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-05-25  1:02 ` [PATCH v7 29/92] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-05-25  1:02 ` [PATCH v7 30/92] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-05-25  1:02 ` [PATCH v7 31/92] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-05-25  1:02 ` [PATCH v7 32/92] target/arm: Implement SVE2 bitwise ternary operations Richard Henderson
2021-05-25  1:02 ` [PATCH v7 33/92] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-05-25  1:03 ` [PATCH v7 34/92] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-05-25  1:03 ` [PATCH v7 35/92] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-05-25  1:03 ` [PATCH v7 36/92] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-05-25  1:03 ` [PATCH v7 37/92] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-05-25  1:03 ` [PATCH v7 38/92] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 39/92] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 40/92] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 41/92] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 42/92] target/arm: Implement SVE2 HISTCNT, HISTSEG Richard Henderson
2021-05-25  1:03 ` Richard Henderson [this message]
2021-05-25  1:03 ` [PATCH v7 44/92] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-05-25  1:03 ` [PATCH v7 45/92] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-05-25  1:03 ` [PATCH v7 46/92] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-05-25  1:03 ` [PATCH v7 47/92] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 48/92] target/arm: Use correct output type for gvec_sdot_*_b Richard Henderson
2021-05-25  1:03 ` [PATCH v7 49/92] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-05-25  1:03 ` [PATCH v7 50/92] target/arm: Pass separate addend to FCMLA helpers Richard Henderson
2021-05-25  1:03 ` [PATCH v7 51/92] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-05-25  1:03 ` [PATCH v7 52/92] target/arm: Split out formats for 3 " Richard Henderson
2021-05-25  1:03 ` [PATCH v7 53/92] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 54/92] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 55/92] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 56/92] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 57/92] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 58/92] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-05-25  1:03 ` [PATCH v7 59/92] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 60/92] target/arm: Implement SVE2 multiply-add long (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 61/92] target/arm: Implement SVE2 integer multiply " Richard Henderson
2021-05-25  1:03 ` [PATCH v7 62/92] target/arm: Implement SVE2 complex integer multiply-add (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 63/92] target/arm: Implement SVE2 complex integer dot product Richard Henderson
2021-05-25  1:03 ` [PATCH v7 64/92] target/arm: Macroize helper_gvec_{s,u}dot_{b,h} Richard Henderson
2021-05-25  1:03 ` [PATCH v7 65/92] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h} Richard Henderson
2021-05-25  1:03 ` [PATCH v7 66/92] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 67/92] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-05-25  1:03 ` [PATCH v7 68/92] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-05-25  1:03 ` [PATCH v7 69/92] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-05-25  1:03 ` [PATCH v7 70/92] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-05-25  1:03 ` [PATCH v7 71/92] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-05-25  1:03 ` [PATCH v7 72/92] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 73/92] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 74/92] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-05-25  1:03 ` [PATCH v7 75/92] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-05-25  1:03 ` [PATCH v7 76/92] target/arm: Share table of sve load functions Richard Henderson
2021-05-25  1:03 ` [PATCH v7 77/92] target/arm: Tidy do_ldrq Richard Henderson
2021-05-25  1:03 ` [PATCH v7 78/92] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-05-25  1:03 ` [PATCH v7 79/92] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-05-25  1:03 ` [PATCH v7 80/92] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-05-25  1:03 ` [PATCH v7 81/92] target/arm: Move endian adjustment macros to vec_internal.h Richard Henderson
2021-05-25  1:03 ` [PATCH v7 82/92] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-05-25  1:03 ` [PATCH v7 83/92] target/arm: Implement aarch64 SUDOT, USDOT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 84/92] target/arm: Split out do_neon_ddda_fpst Richard Henderson
2021-05-25  1:03 ` [PATCH v7 85/92] target/arm: Remove unused fpst from VDOT_scalar Richard Henderson
2021-05-25  1:03 ` [PATCH v7 86/92] target/arm: Fix decode for VDOT (indexed) Richard Henderson
2021-05-25  1:03 ` [PATCH v7 87/92] target/arm: Split out do_neon_ddda Richard Henderson
2021-05-25  1:03 ` [PATCH v7 88/92] target/arm: Split decode of VSDOT and VUDOT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 89/92] target/arm: Implement aarch32 VSUDOT, VUSDOT Richard Henderson
2021-05-25  1:03 ` [PATCH v7 90/92] target/arm: Implement integer matrix multiply accumulate Richard Henderson
2021-05-25  1:03 ` [PATCH v7 91/92] linux-user/aarch64: Enable hwcap bits for sve2 and related extensions Richard Henderson
2021-05-25  1:03 ` [PATCH v7 92/92] target/arm: Enable SVE2 " Richard Henderson
2021-05-25  2:37 ` [PATCH v7 00/92] target/arm: Implement SVE2 no-reply
2021-05-25 12:33 ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210525010358.152808-44-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.