qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Subject: [PATCH v5 49/81] target/arm: Pass separate addend to FCMLA helpers
Date: Fri, 16 Apr 2021 14:02:08 -0700	[thread overview]
Message-ID: <20210416210240.1591291-50-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210416210240.1591291-1-richard.henderson@linaro.org>

For SVE, we potentially have a 4th argument coming from the
movprfx instruction.  Currently we do not optimize movprfx,
so the problem is not visible.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper.h             | 20 ++++++-------
 target/arm/translate-a64.c      | 28 ++++++++++++++----
 target/arm/translate-sve.c      |  5 ++--
 target/arm/vec_helper.c         | 50 +++++++++++++--------------------
 target/arm/translate-neon.c.inc | 10 ++++---
 5 files changed, 62 insertions(+), 51 deletions(-)

diff --git a/target/arm/helper.h b/target/arm/helper.h
index f4b092ee1c..72c5bf6aca 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -629,16 +629,16 @@ DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
 DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 
-DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG,
-                   void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
-                   void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG,
-                   void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
-                   void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
-                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, ptr, i32)
 
 DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 004ff8c019..f45b81e56d 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -709,6 +709,23 @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 }
 
+/*
+ * Expand a 4-operand + fpstatus pointer + simd data value operation using
+ * an out-of-line helper.
+ */
+static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
+                              int rm, int ra, bool is_fp16, int data,
+                              gen_helper_gvec_4_ptr *fn)
+{
+    TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+    tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+                       vec_full_reg_offset(s, rn),
+                       vec_full_reg_offset(s, rm),
+                       vec_full_reg_offset(s, ra), fpst,
+                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+    tcg_temp_free_ptr(fpst);
+}
+
 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
  * than the 32 bit equivalent.
  */
@@ -12220,15 +12237,15 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
         rot = extract32(opcode, 0, 2);
         switch (size) {
         case 1:
-            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
+            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
                               gen_helper_gvec_fcmlah);
             break;
         case 2:
-            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
                               gen_helper_gvec_fcmlas);
             break;
         case 3:
-            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
                               gen_helper_gvec_fcmlad);
             break;
         default:
@@ -13479,9 +13496,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
         {
             int rot = extract32(insn, 13, 2);
             int data = (index << 2) | rot;
-            tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+            tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
                                vec_full_reg_offset(s, rn),
-                               vec_full_reg_offset(s, rm), fpst,
+                               vec_full_reg_offset(s, rm),
+                               vec_full_reg_offset(s, rd), fpst,
                                is_q ? 16 : 8, vec_full_reg_size(s), data,
                                size == MO_64
                                ? gen_helper_gvec_fcmlas_idx
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 0317d5386a..ffae6884d2 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -4383,7 +4383,7 @@ static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
 
 static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
 {
-    static gen_helper_gvec_3_ptr * const fns[2] = {
+    static gen_helper_gvec_4_ptr * const fns[2] = {
         gen_helper_gvec_fcmlah_idx,
         gen_helper_gvec_fcmlas_idx,
     };
@@ -4393,9 +4393,10 @@ static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
     if (sve_access_check(s)) {
         unsigned vsz = vec_full_reg_size(s);
         TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
-        tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+        tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
                            vec_full_reg_offset(s, a->rn),
                            vec_full_reg_offset(s, a->rm),
+                           vec_full_reg_offset(s, a->ra),
                            status, vsz, vsz,
                            a->index * 4 + a->rot,
                            fns[a->esz - 1]);
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index f88e572132..b19877e0d3 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -657,13 +657,11 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
-void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
                          void *vfpst, uint32_t desc)
 {
     uintptr_t opr_sz = simd_oprsz(desc);
-    float16 *d = vd;
-    float16 *n = vn;
-    float16 *m = vm;
+    float16 *d = vd, *n = vn, *m = vm, *a = va;
     float_status *fpst = vfpst;
     intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
     uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -680,19 +678,17 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
         float16 e4 = e2;
         float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
 
-        d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
-        d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
+        d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
+        d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
-void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
                              void *vfpst, uint32_t desc)
 {
     uintptr_t opr_sz = simd_oprsz(desc);
-    float16 *d = vd;
-    float16 *n = vn;
-    float16 *m = vm;
+    float16 *d = vd, *n = vn, *m = vm, *a = va;
     float_status *fpst = vfpst;
     intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
     uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -716,20 +712,18 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
             float16 e2 = n[H2(j + flip)];
             float16 e4 = e2;
 
-            d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
-            d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
+            d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
+            d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
         }
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
-void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
                          void *vfpst, uint32_t desc)
 {
     uintptr_t opr_sz = simd_oprsz(desc);
-    float32 *d = vd;
-    float32 *n = vn;
-    float32 *m = vm;
+    float32 *d = vd, *n = vn, *m = vm, *a = va;
     float_status *fpst = vfpst;
     intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
     uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -746,19 +740,17 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
         float32 e4 = e2;
         float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
 
-        d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
-        d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
+        d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
+        d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
-void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
                              void *vfpst, uint32_t desc)
 {
     uintptr_t opr_sz = simd_oprsz(desc);
-    float32 *d = vd;
-    float32 *n = vn;
-    float32 *m = vm;
+    float32 *d = vd, *n = vn, *m = vm, *a = va;
     float_status *fpst = vfpst;
     intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
     uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -782,20 +774,18 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
             float32 e2 = n[H4(j + flip)];
             float32 e4 = e2;
 
-            d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
-            d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
+            d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
+            d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
         }
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
-void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
                          void *vfpst, uint32_t desc)
 {
     uintptr_t opr_sz = simd_oprsz(desc);
-    float64 *d = vd;
-    float64 *n = vn;
-    float64 *m = vm;
+    float64 *d = vd, *n = vn, *m = vm, *a = va;
     float_status *fpst = vfpst;
     intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
     uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -812,8 +802,8 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
         float64 e4 = e2;
         float64 e3 = m[i + 1 - flip] ^ neg_imag;
 
-        d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
-        d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
+        d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
+        d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
     }
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
index 8ecfef1567..6f418bd8de 100644
--- a/target/arm/translate-neon.c.inc
+++ b/target/arm/translate-neon.c.inc
@@ -146,7 +146,7 @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
 {
     int opr_sz;
     TCGv_ptr fpst;
-    gen_helper_gvec_3_ptr *fn_gvec_ptr;
+    gen_helper_gvec_4_ptr *fn_gvec_ptr;
 
     if (!dc_isar_feature(aa32_vcma, s)
         || (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
@@ -171,9 +171,10 @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
     fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
     fn_gvec_ptr = (a->size == MO_16) ?
         gen_helper_gvec_fcmlah : gen_helper_gvec_fcmlas;
-    tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+    tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
                        vfp_reg_offset(1, a->vn),
                        vfp_reg_offset(1, a->vm),
+                       vfp_reg_offset(1, a->vd),
                        fpst, opr_sz, opr_sz, a->rot,
                        fn_gvec_ptr);
     tcg_temp_free_ptr(fpst);
@@ -284,7 +285,7 @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
 
 static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
 {
-    gen_helper_gvec_3_ptr *fn_gvec_ptr;
+    gen_helper_gvec_4_ptr *fn_gvec_ptr;
     int opr_sz;
     TCGv_ptr fpst;
 
@@ -313,9 +314,10 @@ static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
         gen_helper_gvec_fcmlah_idx : gen_helper_gvec_fcmlas_idx;
     opr_sz = (1 + a->q) * 8;
     fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
-    tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+    tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
                        vfp_reg_offset(1, a->vn),
                        vfp_reg_offset(1, a->vm),
+                       vfp_reg_offset(1, a->vd),
                        fpst, opr_sz, opr_sz,
                        (a->index << 2) | a->rot, fn_gvec_ptr);
     tcg_temp_free_ptr(fpst);
-- 
2.25.1



  parent reply	other threads:[~2021-04-16 22:04 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-16 21:01 [PATCH v5 for-6.1 00/81] target/arm: Implement SVE2 Richard Henderson
2021-04-16 21:01 ` [PATCH v5 01/81] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2021-04-16 21:01 ` [PATCH v5 02/81] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-04-16 21:01 ` [PATCH v5 03/81] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 04/81] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 05/81] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-04-16 21:01 ` [PATCH v5 06/81] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 07/81] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 08/81] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-04-16 21:01 ` [PATCH v5 09/81] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2021-04-16 21:01 ` [PATCH v5 10/81] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 11/81] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 12/81] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-04-16 21:01 ` [PATCH v5 13/81] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 14/81] target/arm: Implement PMULLB and PMULLT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 15/81] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 16/81] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-04-16 21:01 ` [PATCH v5 17/81] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-04-16 21:01 ` [PATCH v5 18/81] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-04-16 21:01 ` [PATCH v5 19/81] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 20/81] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-04-16 21:01 ` [PATCH v5 21/81] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-04-16 21:01 ` [PATCH v5 22/81] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-04-16 21:01 ` [PATCH v5 23/81] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-04-16 21:01 ` [PATCH v5 24/81] target/arm: Implement SVE2 saturating extract narrow Richard Henderson
2021-04-16 21:01 ` [PATCH v5 25/81] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-04-16 21:01 ` [PATCH v5 26/81] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 27/81] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 28/81] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 29/81] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-04-16 21:01 ` [PATCH v5 30/81] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-04-16 21:01 ` [PATCH v5 31/81] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-04-16 21:01 ` [PATCH v5 32/81] target/arm: Implement SVE2 bitwise ternary operations Richard Henderson
2021-04-16 21:01 ` [PATCH v5 33/81] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-04-16 21:01 ` [PATCH v5 34/81] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 35/81] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-04-16 21:01 ` [PATCH v5 36/81] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-04-16 21:01 ` [PATCH v5 37/81] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-04-16 21:01 ` [PATCH v5 38/81] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 39/81] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-04-16 21:01 ` [PATCH v5 40/81] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 41/81] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 42/81] target/arm: Implement SVE2 HISTCNT, HISTSEG Richard Henderson
2021-04-16 21:02 ` [PATCH v5 43/81] target/arm: Implement SVE2 XAR Richard Henderson
2021-04-16 21:02 ` [PATCH v5 44/81] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-04-16 21:02 ` [PATCH v5 45/81] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-04-16 21:02 ` [PATCH v5 46/81] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-04-16 21:02 ` [PATCH v5 47/81] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 48/81] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-04-16 21:02 ` Richard Henderson [this message]
2021-05-13 10:48   ` [PATCH v5 49/81] target/arm: Pass separate addend to FCMLA helpers Peter Maydell
2021-04-16 21:02 ` [PATCH v5 50/81] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-04-16 21:02 ` [PATCH v5 51/81] target/arm: Split out formats for 3 " Richard Henderson
2021-04-16 21:02 ` [PATCH v5 52/81] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 53/81] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 54/81] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 55/81] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 56/81] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 57/81] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-04-16 21:02 ` [PATCH v5 58/81] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 59/81] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 60/81] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-04-16 21:02 ` [PATCH v5 61/81] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-04-16 21:02 ` [PATCH v5 62/81] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-04-16 21:02 ` [PATCH v5 63/81] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-04-16 21:02 ` [PATCH v5 64/81] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-04-16 21:02 ` [PATCH v5 65/81] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 66/81] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 67/81] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-04-16 21:02 ` [PATCH v5 68/81] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-04-16 21:02 ` [PATCH v5 69/81] target/arm: Share table of sve load functions Richard Henderson
2021-04-16 21:02 ` [PATCH v5 70/81] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-04-16 21:02 ` [PATCH v5 71/81] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-04-16 21:02 ` [PATCH v5 72/81] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-04-16 21:02 ` [PATCH v5 73/81] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-04-16 21:02 ` [PATCH v5 74/81] target/arm: Implement aarch64 SUDOT, USDOT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 75/81] target/arm: Split out do_neon_ddda_fpst Richard Henderson
2021-04-16 21:02 ` [PATCH v5 76/81] target/arm: Remove unused fpst from VDOT_scalar Richard Henderson
2021-04-16 21:02 ` [PATCH v5 77/81] target/arm: Fix decode for VDOT (indexed) Richard Henderson
2021-04-16 21:02 ` [PATCH v5 78/81] target/arm: Split decode of VSDOT and VUDOT Richard Henderson
2021-04-16 21:02 ` [PATCH v5 79/81] target/arm: Implement aarch32 VSUDOT, VUSDOT Richard Henderson
2021-04-16 21:18 ` [PATCH v5 80/81] target/arm: Implement integer matrix multiply accumulate Richard Henderson
2021-04-16 21:19 ` [PATCH v5 81/81] target/arm: Enable SVE2 and some extensions Richard Henderson
2021-04-16 22:46 ` [PATCH v5 for-6.1 00/81] target/arm: Implement SVE2 no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210416210240.1591291-50-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).