qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-arm@nongnu.org, qemu-devel@nongnu.org
Subject: [PATCH for-6.2 24/34] target/arm: Implement MVE VMLADAV and VMLSLDAV
Date: Tue, 13 Jul 2021 14:37:16 +0100	[thread overview]
Message-ID: <20210713133726.26842-25-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210713133726.26842-1-peter.maydell@linaro.org>

Implement the MVE VMLADAV and VMLSLDAV insns.  Like the VMLALDAV and
VMLSLDAV insns already implemented, these accumulate multiplied
vector elements; but they accumulate a 32-bit result rather than a
64-bit one.

Note that these encodings overlap with what would be RdaHi=0b111 for
VMLALDAV, VMLSLDAV, VRMLALDAVH and VRMLSLDAVH.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/helper-mve.h    | 17 ++++++++++
 target/arm/mve.decode      | 33 +++++++++++++++++---
 target/arm/mve_helper.c    | 41 ++++++++++++++++++++++++
 target/arm/translate-mve.c | 64 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 150 insertions(+), 5 deletions(-)

diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 84aa9de6e06..088bdd3ca50 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -400,6 +400,23 @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
 DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
 DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
 
+DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
 DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
 DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 79c529e762f..0c4708ea988 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -320,32 +320,55 @@ VDUP             1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
 %size_16 16:1 !function=plus_1
 
 &vmlaldav rdahi rdalo size qn qm x a
+&vmladav rda size qn qm x a
 
 @vmlaldav        .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
                  qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
 @vmlaldav_nosz   .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
                  qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
-VMLALDAV_S       1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
-VMLALDAV_U       1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+@vmladav         .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+                 qn=%qn rda=%rdalo size=%size_16 &vmladav
+@vmladav_nosz    .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+                 qn=%qn rda=%rdalo size=0 &vmladav
 
-VMLSLDAV         1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
+{
+  VMLADAV_S      1110 1110 1111  ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+  VMLALDAV_S     1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+{
+  VMLADAV_U      1111 1110 1111  ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+  VMLALDAV_U     1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+
+{
+  VMLSDAV        1110 1110 1111  ... . ... . 1110 . 0 . 0 ... 1 @vmladav
+  VMLSLDAV       1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
+}
+
+{
+  VMLSDAV        1111 1110 1111  ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz
+  VRMLSLDAVH     1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
+}
+
+VMLADAV_S        1110 1110 1111  ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
+VMLADAV_U        1111 1110 1111  ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
 
 {
   VMAXV_S        1110 1110 1110  .. 10 ....  1111 0 0 . 0 ... 0 @vmaxv
   VMINV_S        1110 1110 1110  .. 10 ....  1111 1 0 . 0 ... 0 @vmaxv
   VMAXAV         1110 1110 1110  .. 00 ....  1111 0 0 . 0 ... 0 @vmaxv
   VMINAV         1110 1110 1110  .. 00 ....  1111 1 0 . 0 ... 0 @vmaxv
+  VMLADAV_S      1110 1110 1111  ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
   VRMLALDAVH_S   1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
 }
 
 {
   VMAXV_U        1111 1110 1110  .. 10 ....  1111 0 0 . 0 ... 0 @vmaxv
   VMINV_U        1111 1110 1110  .. 10 ....  1111 1 0 . 0 ... 0 @vmaxv
+  VMLADAV_U      1111 1110 1111  ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
   VRMLALDAVH_U   1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
 }
 
-VRMLSLDAVH       1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
-
 # Scalar operations
 
 VADD_scalar      1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 725fe64a348..8b70362f012 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -1201,6 +1201,47 @@ DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
 DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
 DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
 
+/*
+ * Multiply add dual accumulate ops
+ */
+#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
+    uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn,         \
+                                    void *vm, uint32_t a)               \
+    {                                                                   \
+        uint16_t mask = mve_element_mask(env);                          \
+        unsigned e;                                                     \
+        TYPE *n = vn, *m = vm;                                          \
+        for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) {              \
+            if (mask & 1) {                                             \
+                if (e & 1) {                                            \
+                    a ODDACC                                            \
+                        n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)];     \
+                } else {                                                \
+                    a EVENACC                                           \
+                        n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)];     \
+                }                                                       \
+            }                                                           \
+        }                                                               \
+        mve_advance_vpt(env);                                           \
+        return a;                                                       \
+    }
+
+#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC)           \
+    DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC)   \
+    DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC)  \
+    DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
+
+#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC)           \
+    DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC)  \
+    DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
+    DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
+
+DO_DAV_S(vmladavs, false, +=, +=)
+DO_DAV_U(vmladavu, false, +=, +=)
+DO_DAV_S(vmlsdav, false, +=, -=)
+DO_DAV_S(vmladavsx, true, +=, +=)
+DO_DAV_S(vmlsdavx, true, +=, -=)
+
 /*
  * Rounding multiply add long dual accumulate high. In the pseudocode
  * this is implemented with a 72-bit internal accumulator value of which
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 22b178296f4..67b9c07447a 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -46,6 +46,7 @@ typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TC
 typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
 typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
 typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
 
 /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
 static inline long mve_qreg_offset(unsigned reg)
@@ -766,6 +767,69 @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
     return do_long_dual_acc(s, a, fns[a->x]);
 }
 
+static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
+{
+    TCGv_ptr qn, qm;
+    TCGv_i32 rda;
+
+    if (!dc_isar_feature(aa32_mve, s) ||
+        !mve_check_qreg_bank(s, a->qn) ||
+        !fn) {
+        return false;
+    }
+    if (!mve_eci_check(s) || !vfp_access_check(s)) {
+        return true;
+    }
+
+    qn = mve_qreg_ptr(a->qn);
+    qm = mve_qreg_ptr(a->qm);
+
+    /*
+     * This insn is subject to beat-wise execution. Partial execution
+     * of an A=0 (no-accumulate) insn which does not execute the first
+     * beat must start with the current rda value, not 0.
+     */
+    if (a->a || mve_skip_first_beat(s)) {
+        rda = load_reg(s, a->rda);
+    } else {
+        rda = tcg_const_i32(0);
+    }
+
+    fn(rda, cpu_env, qn, qm, rda);
+    store_reg(s, a->rda, rda);
+    tcg_temp_free_ptr(qn);
+    tcg_temp_free_ptr(qm);
+
+    mve_update_eci(s);
+    return true;
+}
+
+#define DO_DUAL_ACC(INSN, FN)                                           \
+    static bool trans_##INSN(DisasContext *s, arg_vmladav *a)           \
+    {                                                                   \
+        static MVEGenDualAccOpFn * const fns[4][2] = {                  \
+            { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb },        \
+            { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh },        \
+            { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw },        \
+            { NULL, NULL },                                             \
+        };                                                              \
+        return do_dual_acc(s, a, fns[a->size][a->x]);                   \
+    }
+
+DO_DUAL_ACC(VMLADAV_S, vmladavs)
+DO_DUAL_ACC(VMLSDAV, vmlsdav)
+
+static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a)
+{
+    static MVEGenDualAccOpFn * const fns[4][2] = {
+        { gen_helper_mve_vmladavub, NULL },
+        { gen_helper_mve_vmladavuh, NULL },
+        { gen_helper_mve_vmladavuw, NULL },
+        { NULL, NULL },
+    };
+    return do_dual_acc(s, a, fns[a->size][a->x]);
+}
+
 static void gen_vpst(DisasContext *s, uint32_t mask)
 {
     /*
-- 
2.20.1



  parent reply	other threads:[~2021-07-13 13:54 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-13 13:36 [PATCH for-6.2 00/34] target/arm: Third slice of MVE implementation Peter Maydell
2021-07-13 13:36 ` [PATCH for-6.2 01/34] target/arm: Note that we handle VMOVL as a special case of VSHLL Peter Maydell
2021-07-14 17:02   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 02/34] target/arm: Print MVE VPR in CPU dumps Peter Maydell
2021-07-15  1:34   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 03/34] target/arm: Fix MVE VSLI by 0 and VSRI by <dt> Peter Maydell
2021-07-16 16:27   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 04/34] target/arm: Fix signed VADDV Peter Maydell
2021-07-15  1:37   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 05/34] target/arm: Fix mask handling for MVE narrowing operations Peter Maydell
2021-07-16 16:29   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 06/34] target/arm: Fix 48-bit saturating shifts Peter Maydell
2021-07-16 16:34   ` Richard Henderson
2021-07-16 16:39     ` Peter Maydell
2021-07-13 13:36 ` [PATCH for-6.2 07/34] target/arm: Fix calculation of LTP mask when LR is 0 Peter Maydell
2021-07-16 16:35   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 08/34] target/arm: Fix VPT advance when ECI is non-zero Peter Maydell
2021-07-16 16:44   ` Richard Henderson
2021-07-16 16:58   ` Richard Henderson
2021-07-19 14:16     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 09/34] target/arm: Factor out mve_eci_mask() Peter Maydell
2021-07-16 16:48   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 10/34] target/arm: Fix VLDRB/H/W for predicated elements Peter Maydell
2021-07-16 16:58   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 11/34] target/arm: Implement MVE VMULL (polynomial) Peter Maydell
2021-07-16 17:14   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 12/34] target/arm: Implement MVE incrementing/decrementing dup insns Peter Maydell
2021-07-16 19:57   ` Richard Henderson
2021-07-19 14:25     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 13/34] target/arm: Factor out gen_vpst() Peter Maydell
2021-07-16 21:04   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 14/34] target/arm: Implement MVE integer vector comparisons Peter Maydell
2021-07-16 21:55   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 15/34] target/arm: Implement MVE integer vector-vs-scalar comparisons Peter Maydell
2021-07-16 21:58   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 16/34] target/arm: Implement MVE VPSEL Peter Maydell
2021-07-16 22:03   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 17/34] target/arm: Implement MVE VMLAS Peter Maydell
2021-07-16 22:11   ` Richard Henderson
2021-07-17 10:06     ` Peter Maydell
2021-07-17 20:40       ` Richard Henderson
2021-07-20 10:13         ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 18/34] target/arm: Implement MVE shift-by-scalar Peter Maydell
2021-07-16 22:15   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 19/34] target/arm: Move 'x' and 'a' bit definitions into vmlaldav formats Peter Maydell
2021-07-16 22:16   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 20/34] target/arm: Implement MVE integer min/max across vector Peter Maydell
2021-07-17 20:46   ` Richard Henderson
2021-07-19 15:28     ` Peter Maydell
2021-07-20 18:21       ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 21/34] target/arm: Implement MVE VABAV Peter Maydell
2021-07-17 20:50   ` Richard Henderson
2021-07-17 22:13     ` Peter Maydell
2021-07-17 22:18       ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 22/34] target/arm: Implement MVE narrowing moves Peter Maydell
2021-07-21 21:45   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 23/34] target/arm: Rename MVEGenDualAccOpFn to MVEGenLongDualAccOpFn Peter Maydell
2021-07-21 21:45   ` Richard Henderson
2021-07-13 13:37 ` Peter Maydell [this message]
2021-07-21 22:01   ` [PATCH for-6.2 24/34] target/arm: Implement MVE VMLADAV and VMLSLDAV Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 25/34] target/arm: Implement MVE VMLA Peter Maydell
2021-07-21 22:03   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 26/34] target/arm: Implement MVE saturating doubling multiply accumulates Peter Maydell
2021-07-21 22:07   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 27/34] target/arm: Implement MVE VQABS, VQNEG Peter Maydell
2021-07-21 22:09   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 28/34] target/arm: Implement MVE VMAXA, VMINA Peter Maydell
2021-07-21 22:12   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 29/34] target/arm: Implement MVE VMOV to/from 2 general-purpose registers Peter Maydell
2021-07-21 22:20   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 30/34] target/arm: Implement MVE VPNOT Peter Maydell
2021-07-21 22:24   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 31/34] target/arm: Implement MVE VCTP Peter Maydell
2021-07-21 22:33   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 32/34] target/arm: Implement MVE scatter-gather insns Peter Maydell
2021-07-22  0:36   ` Richard Henderson
2021-07-22  8:42     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 33/34] target/arm: Implement MVE scatter-gather immediate forms Peter Maydell
2021-07-22  0:49   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 34/34] target/arm: Implement MVE interleaving loads/stores Peter Maydell
2021-07-22 17:52   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210713133726.26842-25-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).