All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-arm@nongnu.org, qemu-devel@nongnu.org
Subject: [PATCH for-6.2 26/34] target/arm: Implement MVE saturating doubling multiply accumulates
Date: Tue, 13 Jul 2021 14:37:18 +0100	[thread overview]
Message-ID: <20210713133726.26842-27-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210713133726.26842-1-peter.maydell@linaro.org>

Implement the MVE saturating doubling multiply accumulate insns
VQDMLAH, VQRDMLAH, VQDMLASH and VQRDMLASH.  These perform a multiply,
double, add the accumulator shifted by the element size, possibly
round, saturate to twice the element size, then take the high half of
the result.  The *MLAH insns do vector * scalar + vector, and the
*MLASH insns do vector * vector + scalar.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/helper-mve.h    | 16 +++++++
 target/arm/mve.decode      |  5 ++
 target/arm/mve_helper.c    | 95 ++++++++++++++++++++++++++++++++++++++
 target/arm/translate-mve.c |  4 ++
 4 files changed, 120 insertions(+)

diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 50b34c601e1..e61c5d56f41 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -387,6 +387,22 @@ DEF_HELPER_FLAGS_4(mve_vmlasub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(mve_vmlasuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(mve_vmlasuw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(mve_vqdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
 DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
 DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 2e2df61c860..99cea8d39b6 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -418,6 +418,11 @@ VMLA_U           1111 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar
 VMLAS_S          1110 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
 VMLAS_U          1111 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
 
+VQRDMLAH         1110 1110 0 . .. ... 0 ... 0 1110 . 100 .... @2scalar
+VQRDMLASH        1110 1110 0 . .. ... 0 ... 1 1110 . 100 .... @2scalar
+VQDMLAH          1110 1110 0 . .. ... 0 ... 0 1110 . 110 .... @2scalar
+VQDMLASH         1110 1110 0 . .. ... 0 ... 1 1110 . 110 .... @2scalar
+
 # Vector add across vector
 {
   VADDV          111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 91c0add8da7..1013060baeb 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -971,6 +971,28 @@ DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
         mve_advance_vpt(env);                                           \
     }
 
+#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN)                      \
+    void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn,   \
+                                uint32_t rm)                            \
+    {                                                                   \
+        TYPE *d = vd, *n = vn;                                          \
+        TYPE m = rm;                                                    \
+        uint16_t mask = mve_element_mask(env);                          \
+        unsigned e;                                                     \
+        bool qc = false;                                                \
+        for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) {              \
+            bool sat = false;                                           \
+            mergemask(&d[H##ESIZE(e)],                                  \
+                      FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat),      \
+                      mask);                                            \
+            qc |= sat & mask & 1;                                       \
+        }                                                               \
+        if (qc) {                                                       \
+            env->vfp.qc[0] = qc;                                        \
+        }                                                               \
+        mve_advance_vpt(env);                                           \
+    }
+
 /* provide unsigned 2-op scalar helpers for all sizes */
 #define DO_2OP_SCALAR_U(OP, FN)                 \
     DO_2OP_SCALAR(OP##b, 1, uint8_t, FN)        \
@@ -1019,6 +1041,79 @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
 
+static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
+{
+    int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
+    return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
+}
+
+static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
+                           int round, bool *sat)
+{
+    int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
+    return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
+}
+
+static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
+                            int round, bool *sat)
+{
+    /*
+     * Architecturally we should do the entire add, double, round
+     * and then check for saturation. We do three saturating adds,
+     * but we need to be careful about the order. If the first
+     * m1 + m2 saturates then it's impossible for the *2+rc to
+     * bring it back into the non-saturated range. However, if
+     * m1 + m2 is negative then it's possible that doing the doubling
+     * would take the intermediate result below INT64_MAX and the
+     * addition of the rounding constant then brings it back in range.
+     * So we add half the rounding constant and half the "c << esize"
+     * before doubling rather than adding the rounding constant after
+     * the doubling.
+     */
+    int64_t m1 = (int64_t)a * b;
+    int64_t m2 = (int64_t)c << 31;
+    int64_t r;
+    if (sadd64_overflow(m1, m2, &r) ||
+        sadd64_overflow(r, (round << 30), &r) ||
+        sadd64_overflow(r, r, &r)) {
+        *sat = true;
+        return r < 0 ? INT32_MAX : INT32_MIN;
+    }
+    return r >> 32;
+}
+
+/*
+ * The *MLAH insns are vector * scalar + vector;
+ * the *MLASH insns are vector * vector + scalar
+ */
+#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
+#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
+#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
+#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
+#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
+#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
+
+#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
+#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
+#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
+#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
+#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
+#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
+
+DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
+DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
+DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
+
+DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
+DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
+DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
+
 /* Vector by scalar plus vector */
 #define DO_VMLA(D, N, M) ((N) * (M) + (D))
 
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 650f3b95edf..f8b34c9ef36 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -624,6 +624,10 @@ DO_2OP_SCALAR(VMLA_S, vmlas)
 DO_2OP_SCALAR(VMLA_U, vmlau)
 DO_2OP_SCALAR(VMLAS_S, vmlass)
 DO_2OP_SCALAR(VMLAS_U, vmlasu)
+DO_2OP_SCALAR(VQDMLAH, vqdmlah)
+DO_2OP_SCALAR(VQRDMLAH, vqrdmlah)
+DO_2OP_SCALAR(VQDMLASH, vqdmlash)
+DO_2OP_SCALAR(VQRDMLASH, vqrdmlash)
 
 static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
 {
-- 
2.20.1



  parent reply	other threads:[~2021-07-13 14:02 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-13 13:36 [PATCH for-6.2 00/34] target/arm: Third slice of MVE implementation Peter Maydell
2021-07-13 13:36 ` [PATCH for-6.2 01/34] target/arm: Note that we handle VMOVL as a special case of VSHLL Peter Maydell
2021-07-14 17:02   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 02/34] target/arm: Print MVE VPR in CPU dumps Peter Maydell
2021-07-15  1:34   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 03/34] target/arm: Fix MVE VSLI by 0 and VSRI by <dt> Peter Maydell
2021-07-16 16:27   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 04/34] target/arm: Fix signed VADDV Peter Maydell
2021-07-15  1:37   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 05/34] target/arm: Fix mask handling for MVE narrowing operations Peter Maydell
2021-07-16 16:29   ` Richard Henderson
2021-07-13 13:36 ` [PATCH for-6.2 06/34] target/arm: Fix 48-bit saturating shifts Peter Maydell
2021-07-16 16:34   ` Richard Henderson
2021-07-16 16:39     ` Peter Maydell
2021-07-13 13:36 ` [PATCH for-6.2 07/34] target/arm: Fix calculation of LTP mask when LR is 0 Peter Maydell
2021-07-16 16:35   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 08/34] target/arm: Fix VPT advance when ECI is non-zero Peter Maydell
2021-07-16 16:44   ` Richard Henderson
2021-07-16 16:58   ` Richard Henderson
2021-07-19 14:16     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 09/34] target/arm: Factor out mve_eci_mask() Peter Maydell
2021-07-16 16:48   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 10/34] target/arm: Fix VLDRB/H/W for predicated elements Peter Maydell
2021-07-16 16:58   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 11/34] target/arm: Implement MVE VMULL (polynomial) Peter Maydell
2021-07-16 17:14   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 12/34] target/arm: Implement MVE incrementing/decrementing dup insns Peter Maydell
2021-07-16 19:57   ` Richard Henderson
2021-07-19 14:25     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 13/34] target/arm: Factor out gen_vpst() Peter Maydell
2021-07-16 21:04   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 14/34] target/arm: Implement MVE integer vector comparisons Peter Maydell
2021-07-16 21:55   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 15/34] target/arm: Implement MVE integer vector-vs-scalar comparisons Peter Maydell
2021-07-16 21:58   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 16/34] target/arm: Implement MVE VPSEL Peter Maydell
2021-07-16 22:03   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 17/34] target/arm: Implement MVE VMLAS Peter Maydell
2021-07-16 22:11   ` Richard Henderson
2021-07-17 10:06     ` Peter Maydell
2021-07-17 20:40       ` Richard Henderson
2021-07-20 10:13         ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 18/34] target/arm: Implement MVE shift-by-scalar Peter Maydell
2021-07-16 22:15   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 19/34] target/arm: Move 'x' and 'a' bit definitions into vmlaldav formats Peter Maydell
2021-07-16 22:16   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 20/34] target/arm: Implement MVE integer min/max across vector Peter Maydell
2021-07-17 20:46   ` Richard Henderson
2021-07-19 15:28     ` Peter Maydell
2021-07-20 18:21       ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 21/34] target/arm: Implement MVE VABAV Peter Maydell
2021-07-17 20:50   ` Richard Henderson
2021-07-17 22:13     ` Peter Maydell
2021-07-17 22:18       ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 22/34] target/arm: Implement MVE narrowing moves Peter Maydell
2021-07-21 21:45   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 23/34] target/arm: Rename MVEGenDualAccOpFn to MVEGenLongDualAccOpFn Peter Maydell
2021-07-21 21:45   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 24/34] target/arm: Implement MVE VMLADAV and VMLSLDAV Peter Maydell
2021-07-21 22:01   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 25/34] target/arm: Implement MVE VMLA Peter Maydell
2021-07-21 22:03   ` Richard Henderson
2021-07-13 13:37 ` Peter Maydell [this message]
2021-07-21 22:07   ` [PATCH for-6.2 26/34] target/arm: Implement MVE saturating doubling multiply accumulates Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 27/34] target/arm: Implement MVE VQABS, VQNEG Peter Maydell
2021-07-21 22:09   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 28/34] target/arm: Implement MVE VMAXA, VMINA Peter Maydell
2021-07-21 22:12   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 29/34] target/arm: Implement MVE VMOV to/from 2 general-purpose registers Peter Maydell
2021-07-21 22:20   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 30/34] target/arm: Implement MVE VPNOT Peter Maydell
2021-07-21 22:24   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 31/34] target/arm: Implement MVE VCTP Peter Maydell
2021-07-21 22:33   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 32/34] target/arm: Implement MVE scatter-gather insns Peter Maydell
2021-07-22  0:36   ` Richard Henderson
2021-07-22  8:42     ` Peter Maydell
2021-07-13 13:37 ` [PATCH for-6.2 33/34] target/arm: Implement MVE scatter-gather immediate forms Peter Maydell
2021-07-22  0:49   ` Richard Henderson
2021-07-13 13:37 ` [PATCH for-6.2 34/34] target/arm: Implement MVE interleaving loads/stores Peter Maydell
2021-07-22 17:52   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210713133726.26842-27-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.