qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v5 24/81] target/arm: Implement SVE2 saturating extract narrow
Date: Fri, 16 Apr 2021 14:01:43 -0700	[thread overview]
Message-ID: <20210416210240.1591291-25-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210416210240.1591291-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper-sve.h    |  24 ++++
 target/arm/sve.decode      |  12 ++
 target/arm/sve_helper.c    |  56 +++++++++
 target/arm/translate-sve.c | 238 +++++++++++++++++++++++++++++++++++++
 4 files changed, 330 insertions(+)

diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 4a62012850..b302203ce8 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2419,3 +2419,27 @@ DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
 
 DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 32b15e4192..19866ec4c6 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1272,3 +1272,15 @@ SLI             01000101 .. 0 ..... 11110 1 ..... .....  @rd_rn_tszimm_shl
 # TODO: Use @rda and %reg_movprfx here.
 SABA            01000101 .. 0 ..... 11111 0 ..... .....  @rd_rn_rm
 UABA            01000101 .. 0 ..... 11111 1 ..... .....  @rd_rn_rm
+
+#### SVE2 Narrowing
+
+## SVE2 saturating extract narrow
+
+# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0.
+SQXTNB          01000101 .. 1 ..... 010 000 ..... .....  @rd_rn_tszimm_shl
+SQXTNT          01000101 .. 1 ..... 010 001 ..... .....  @rd_rn_tszimm_shl
+UQXTNB          01000101 .. 1 ..... 010 010 ..... .....  @rd_rn_tszimm_shl
+UQXTNT          01000101 .. 1 ..... 010 011 ..... .....  @rd_rn_tszimm_shl
+SQXTUNB         01000101 .. 1 ..... 010 100 ..... .....  @rd_rn_tszimm_shl
+SQXTUNT         01000101 .. 1 ..... 010 101 ..... .....  @rd_rn_tszimm_shl
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 0049ad861f..7dca67785a 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1264,6 +1264,62 @@ DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t,     , H1_4, DO_ABD)
 
 #undef DO_ZZZW_ACC
 
+#define DO_XTNB(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc)         \
+{                                                            \
+    intptr_t i, opr_sz = simd_oprsz(desc);                   \
+    for (i = 0; i < opr_sz; i += sizeof(TYPE)) {             \
+        TYPE nn = *(TYPE *)(vn + i);                         \
+        nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4);  \
+        *(TYPE *)(vd + i) = nn;                              \
+    }                                                        \
+}
+
+#define DO_XTNT(NAME, TYPE, TYPEN, H, OP)                               \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc)                    \
+{                                                                       \
+    intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN));      \
+    for (i = 0; i < opr_sz; i += sizeof(TYPE)) {                        \
+        TYPE nn = *(TYPE *)(vn + i);                                    \
+        *(TYPEN *)(vd + i + odd) = OP(nn);                              \
+    }                                                                   \
+}
+
+#define DO_SQXTN_H(n)  do_sat_bhs(n, INT8_MIN, INT8_MAX)
+#define DO_SQXTN_S(n)  do_sat_bhs(n, INT16_MIN, INT16_MAX)
+#define DO_SQXTN_D(n)  do_sat_bhs(n, INT32_MIN, INT32_MAX)
+
+DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
+DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
+DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
+
+DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
+DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
+DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
+
+#define DO_UQXTN_H(n)  do_sat_bhs(n, 0, UINT8_MAX)
+#define DO_UQXTN_S(n)  do_sat_bhs(n, 0, UINT16_MAX)
+#define DO_UQXTN_D(n)  do_sat_bhs(n, 0, UINT32_MAX)
+
+DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
+DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
+DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
+
+DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
+DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
+DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
+
+#undef DO_XTNB
+#undef DO_XTNT
+
 void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
 {
     intptr_t i, opr_sz = simd_oprsz(desc);
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index ba1953118b..b8ced82e4e 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -6459,3 +6459,241 @@ static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
 {
     return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
 }
+
+static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
+                                   const GVecGen2 ops[3])
+{
+    if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
+        !dc_isar_feature(aa64_sve2, s)) {
+        return false;
+    }
+    if (sve_access_check(s)) {
+        unsigned vsz = vec_full_reg_size(s);
+        tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
+                        vec_full_reg_offset(s, a->rn),
+                        vsz, vsz, &ops[a->esz]);
+    }
+    return true;
+}
+
+static const TCGOpcode sqxtn_list[] = {
+    INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t mask = (1ull << halfbits) - 1;
+    int64_t min = -1ull << (halfbits - 1);
+    int64_t max = -min - 1;
+
+    tcg_gen_dupi_vec(vece, t, min);
+    tcg_gen_smax_vec(vece, d, n, t);
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_smin_vec(vece, d, d, t);
+    tcg_gen_dupi_vec(vece, t, mask);
+    tcg_gen_and_vec(vece, d, d, t);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_sqxtnb_vec,
+          .opt_opc = sqxtn_list,
+          .fno = gen_helper_sve2_sqxtnb_h,
+          .vece = MO_16 },
+        { .fniv = gen_sqxtnb_vec,
+          .opt_opc = sqxtn_list,
+          .fno = gen_helper_sve2_sqxtnb_s,
+          .vece = MO_32 },
+        { .fniv = gen_sqxtnb_vec,
+          .opt_opc = sqxtn_list,
+          .fno = gen_helper_sve2_sqxtnb_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t mask = (1ull << halfbits) - 1;
+    int64_t min = -1ull << (halfbits - 1);
+    int64_t max = -min - 1;
+
+    tcg_gen_dupi_vec(vece, t, min);
+    tcg_gen_smax_vec(vece, n, n, t);
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_smin_vec(vece, n, n, t);
+    tcg_gen_shli_vec(vece, n, n, halfbits);
+    tcg_gen_dupi_vec(vece, t, mask);
+    tcg_gen_bitsel_vec(vece, d, t, d, n);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_sqxtnt_vec,
+          .opt_opc = sqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtnt_h,
+          .vece = MO_16 },
+        { .fniv = gen_sqxtnt_vec,
+          .opt_opc = sqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtnt_s,
+          .vece = MO_32 },
+        { .fniv = gen_sqxtnt_vec,
+          .opt_opc = sqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtnt_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode uqxtn_list[] = {
+    INDEX_op_shli_vec, INDEX_op_umin_vec, 0
+};
+
+static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t max = (1ull << halfbits) - 1;
+
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_umin_vec(vece, d, n, t);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_uqxtnb_vec,
+          .opt_opc = uqxtn_list,
+          .fno = gen_helper_sve2_uqxtnb_h,
+          .vece = MO_16 },
+        { .fniv = gen_uqxtnb_vec,
+          .opt_opc = uqxtn_list,
+          .fno = gen_helper_sve2_uqxtnb_s,
+          .vece = MO_32 },
+        { .fniv = gen_uqxtnb_vec,
+          .opt_opc = uqxtn_list,
+          .fno = gen_helper_sve2_uqxtnb_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t max = (1ull << halfbits) - 1;
+
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_umin_vec(vece, n, n, t);
+    tcg_gen_shli_vec(vece, n, n, halfbits);
+    tcg_gen_bitsel_vec(vece, d, t, d, n);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_uqxtnt_vec,
+          .opt_opc = uqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_uqxtnt_h,
+          .vece = MO_16 },
+        { .fniv = gen_uqxtnt_vec,
+          .opt_opc = uqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_uqxtnt_s,
+          .vece = MO_32 },
+        { .fniv = gen_uqxtnt_vec,
+          .opt_opc = uqxtn_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_uqxtnt_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode sqxtun_list[] = {
+    INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t max = (1ull << halfbits) - 1;
+
+    tcg_gen_dupi_vec(vece, t, 0);
+    tcg_gen_smax_vec(vece, d, n, t);
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_umin_vec(vece, d, d, t);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_sqxtunb_vec,
+          .opt_opc = sqxtun_list,
+          .fno = gen_helper_sve2_sqxtunb_h,
+          .vece = MO_16 },
+        { .fniv = gen_sqxtunb_vec,
+          .opt_opc = sqxtun_list,
+          .fno = gen_helper_sve2_sqxtunb_s,
+          .vece = MO_32 },
+        { .fniv = gen_sqxtunb_vec,
+          .opt_opc = sqxtun_list,
+          .fno = gen_helper_sve2_sqxtunb_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+    TCGv_vec t = tcg_temp_new_vec_matching(d);
+    int halfbits = 4 << vece;
+    int64_t max = (1ull << halfbits) - 1;
+
+    tcg_gen_dupi_vec(vece, t, 0);
+    tcg_gen_smax_vec(vece, n, n, t);
+    tcg_gen_dupi_vec(vece, t, max);
+    tcg_gen_umin_vec(vece, n, n, t);
+    tcg_gen_shli_vec(vece, n, n, halfbits);
+    tcg_gen_bitsel_vec(vece, d, t, d, n);
+    tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
+{
+    static const GVecGen2 ops[3] = {
+        { .fniv = gen_sqxtunt_vec,
+          .opt_opc = sqxtun_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtunt_h,
+          .vece = MO_16 },
+        { .fniv = gen_sqxtunt_vec,
+          .opt_opc = sqxtun_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtunt_s,
+          .vece = MO_32 },
+        { .fniv = gen_sqxtunt_vec,
+          .opt_opc = sqxtun_list,
+          .load_dest = true,
+          .fno = gen_helper_sve2_sqxtunt_d,
+          .vece = MO_64 },
+    };
+    return do_sve2_narrow_extract(s, a, ops);
+}
-- 
2.25.1



  parent reply	other threads:[~2021-04-16 21:16 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-16 21:01 [PATCH v5 for-6.1 00/81] target/arm: Implement SVE2 Richard Henderson
2021-04-16 21:01 ` [PATCH v5 01/81] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2021-04-16 21:01 ` [PATCH v5 02/81] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-04-16 21:01 ` [PATCH v5 03/81] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 04/81] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 05/81] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-04-16 21:01 ` [PATCH v5 06/81] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 07/81] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 08/81] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-04-16 21:01 ` [PATCH v5 09/81] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 10/81] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 11/81] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 12/81] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-04-16 21:01 ` [PATCH v5 13/81] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 14/81] target/arm: Implement PMULLB and PMULLT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 15/81] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 16/81] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-04-16 21:01 ` [PATCH v5 17/81] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-04-16 21:01 ` [PATCH v5 18/81] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-04-16 21:01 ` [PATCH v5 19/81] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 20/81] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-04-16 21:01 ` [PATCH v5 21/81] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-04-16 21:01 ` [PATCH v5 22/81] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-04-16 21:01 ` [PATCH v5 23/81] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-04-16 21:01 ` Richard Henderson [this message]
2021-04-16 21:01 ` [PATCH v5 25/81] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-04-16 21:01 ` [PATCH v5 26/81] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 27/81] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 28/81] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 29/81] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 30/81] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-04-16 21:01 ` [PATCH v5 31/81] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-04-16 21:01 ` [PATCH v5 32/81] target/arm: Implement SVE2 bitwise ternary operations Richard Henderson
2021-04-16 21:01 ` [PATCH v5 33/81] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-04-16 21:01 ` [PATCH v5 34/81] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 35/81] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-04-16 21:01 ` [PATCH v5 36/81] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 37/81] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-04-16 21:01 ` [PATCH v5 38/81] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 39/81] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 40/81] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 41/81] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 42/81] target/arm: Implement SVE2 HISTCNT, HISTSEG Richard Henderson
2021-04-16 21:02 ` [PATCH v5 43/81] target/arm: Implement SVE2 XAR Richard Henderson
2021-04-16 21:02 ` [PATCH v5 44/81] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-04-16 21:02 ` [PATCH v5 45/81] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-04-16 21:02 ` [PATCH v5 46/81] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-04-16 21:02 ` [PATCH v5 47/81] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 48/81] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-04-16 21:02 ` [PATCH v5 49/81] target/arm: Pass separate addend to FCMLA helpers Richard Henderson
2021-05-13 10:48   ` Peter Maydell
2021-04-16 21:02 ` [PATCH v5 50/81] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-04-16 21:02 ` [PATCH v5 51/81] target/arm: Split out formats for 3 " Richard Henderson
2021-04-16 21:02 ` [PATCH v5 52/81] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 53/81] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 54/81] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 55/81] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 56/81] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 57/81] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-04-16 21:02 ` [PATCH v5 58/81] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 59/81] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 60/81] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-04-16 21:02 ` [PATCH v5 61/81] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-04-16 21:02 ` [PATCH v5 62/81] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-04-16 21:02 ` [PATCH v5 63/81] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-04-16 21:02 ` [PATCH v5 64/81] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-04-16 21:02 ` [PATCH v5 65/81] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 66/81] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 67/81] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-04-16 21:02 ` [PATCH v5 68/81] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-04-16 21:02 ` [PATCH v5 69/81] target/arm: Share table of sve load functions Richard Henderson
2021-04-16 21:02 ` [PATCH v5 70/81] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-04-16 21:02 ` [PATCH v5 71/81] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-04-16 21:02 ` [PATCH v5 72/81] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-04-16 21:02 ` [PATCH v5 73/81] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-04-16 21:02 ` [PATCH v5 74/81] target/arm: Implement aarch64 SUDOT, USDOT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 75/81] target/arm: Split out do_neon_ddda_fpst Richard Henderson
2021-04-16 21:02 ` [PATCH v5 76/81] target/arm: Remove unused fpst from VDOT_scalar Richard Henderson
2021-04-16 21:02 ` [PATCH v5 77/81] target/arm: Fix decode for VDOT (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 78/81] target/arm: Split decode of VSDOT and VUDOT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 79/81] target/arm: Implement aarch32 VSUDOT, VUSDOT Richard Henderson
2021-04-16 21:18 ` [PATCH v5 80/81] target/arm: Implement integer matrix multiply accumulate Richard Henderson
2021-04-16 21:19 ` [PATCH v5 81/81] target/arm: Enable SVE2 and some extensions Richard Henderson
2021-04-16 22:46 ` [PATCH v5 for-6.1 00/81] target/arm: Implement SVE2 no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210416210240.1591291-25-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).