All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-arm@nongnu.org, qemu-devel@nongnu.org
Subject: [PATCH 06/18] target/arm: Implement MVE logical immediate insns
Date: Mon, 28 Jun 2021 14:58:23 +0100	[thread overview]
Message-ID: <20210628135835.6690-7-peter.maydell@linaro.org> (raw)
In-Reply-To: <20210628135835.6690-1-peter.maydell@linaro.org>

Implement the MVE logical-immediate insns (VMOV, VMVN,
VORR and VBIC). These have essentially the same encoding
as their Neon equivalents, and we implement the decode
in the same way.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/helper-mve.h    |  4 +++
 target/arm/mve.decode      | 17 +++++++++++++
 target/arm/mve_helper.c    | 24 ++++++++++++++++++
 target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 95 insertions(+)

diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 4bbb9b3ae2c..5248dbe825a 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -355,3 +355,7 @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
 DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
 DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
 DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index d9ece7be5da..caeb016c12f 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -26,10 +26,14 @@
 # VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
 %size_28 28:1 !function=plus_1
 
+# 1imm format immediate
+%imm_28_16_0 28:1 16:3 0:4
+
 &vldr_vstr rn qd imm p a w size l u
 &1op qd qm size
 &2op qd qm qn size
 &2scalar qd qn rm size
+&1imm qd imm cmode op
 
 @vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
 # Note that both Rn and Qd are 3 bits only (no D bit)
@@ -41,6 +45,7 @@
 @2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
 @2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
      size=%size_28
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
 
 # The _rev suffix indicates that Vn and Vm are reversed. This is
 # the case for shifts. In the Arm ARM these insns are documented
@@ -258,3 +263,15 @@ VADDV            111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
 # Predicate operations
 %mask_22_13      22:1 13:3
 VPST             1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
+
+# Logical immediate operations (1 reg and modified-immediate)
+
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
+# not in a way we can conveniently represent in decodetree without
+# a lot of repetition:
+# VORR: op=0, (cmode & 1) && cmode < 12
+# VBIC: op=1, (cmode & 1) && cmode < 12
+# VMOV: everything else
+# So we have a single decode line and check the cmode/op in the
+# trans function.
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 85a552fe070..e6ced144673 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -323,6 +323,30 @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
 
+/*
+ * 1 operand immediates: Vda is destination and possibly also one source.
+ * All these insns work at 64-bit widths.
+ */
+#define DO_1OP_IMM(OP, FN)                                              \
+    void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm)    \
+    {                                                                   \
+        uint64_t *da = vda;                                             \
+        uint16_t mask = mve_element_mask(env);                          \
+        unsigned e;                                                     \
+        for (e = 0; e < 16 / 8; e++, mask >>= 8) {                      \
+            mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask);            \
+        }                                                               \
+        mve_advance_vpt(env);                                           \
+    }
+
+#define DO_MOVI(N, I) (I)
+#define DO_ANDI(N, I) ((N) & (I))
+#define DO_ORRI(N, I) ((N) | (I))
+
+DO_1OP_IMM(vmovi, DO_MOVI)
+DO_1OP_IMM(vandi, DO_ANDI)
+DO_1OP_IMM(vorri, DO_ORRI)
+
 #define DO_2OP(OP, ESIZE, TYPE, FN)                                     \
     void HELPER(glue(mve_, OP))(CPUARMState *env,                       \
                                 void *vd, void *vn, void *vm)           \
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index e9a5442a724..f435a1cfd97 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -34,6 +34,7 @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
 typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
 typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
 typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
 
 /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
 static inline long mve_qreg_offset(unsigned reg)
@@ -787,3 +788,52 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
     mve_update_eci(s);
     return true;
 }
+
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
+{
+    TCGv_ptr qd;
+    uint64_t imm;
+
+    if (!dc_isar_feature(aa32_mve, s) ||
+        !mve_check_qreg_bank(s, a->qd) ||
+        !fn) {
+        return false;
+    }
+    if (!mve_eci_check(s) || !vfp_access_check(s)) {
+        return true;
+    }
+
+    imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+    qd = mve_qreg_ptr(a->qd);
+    fn(cpu_env, qd, tcg_constant_i64(imm));
+    tcg_temp_free_ptr(qd);
+    mve_update_eci(s);
+    return true;
+}
+
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
+{
+    /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
+    MVEGenOneOpImmFn *fn;
+
+    if ((a->cmode & 1) && a->cmode < 12) {
+        if (a->op) {
+            /*
+             * For op=1, the immediate will be inverted by asimd_imm_const(),
+             * so the VBIC becomes a logical AND operation.
+             */
+            fn = gen_helper_mve_vandi;
+        } else {
+            fn = gen_helper_mve_vorri;
+        }
+    } else {
+        /* There is one unallocated cmode/op combination in this space */
+        if (a->cmode == 15 && a->op == 1) {
+            return false;
+        }
+        /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
+        fn = gen_helper_mve_vmovi;
+    }
+    return do_1imm(s, a, fn);
+}
-- 
2.20.1



  parent reply	other threads:[~2021-06-28 14:04 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-28 13:58 [PATCH 00/18] target/arm: Second slice of MVE implementation Peter Maydell
2021-06-28 13:58 ` [PATCH 01/18] target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation Peter Maydell
2021-06-28 15:12   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 02/18] target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH Peter Maydell
2021-06-28 15:17   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 03/18] target/arm: Make asimd_imm_const() public Peter Maydell
2021-06-28 15:19   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 04/18] target/arm: Use asimd_imm_const for A64 decode Peter Maydell
2021-06-28 15:36   ` Richard Henderson
2021-06-28 16:04     ` Peter Maydell
2021-06-28 13:58 ` [PATCH 05/18] target/arm: Use dup_const() instead of bitfield_replicate() Peter Maydell
2021-06-28 15:23   ` Richard Henderson
2021-06-28 13:58 ` Peter Maydell [this message]
2021-06-28 15:37   ` [PATCH 06/18] target/arm: Implement MVE logical immediate insns Richard Henderson
2021-06-28 13:58 ` [PATCH 07/18] target/arm: Implement MVE vector shift left by " Peter Maydell
2021-06-28 16:10   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 08/18] target/arm: Implement MVE vector shift right " Peter Maydell
2021-06-28 16:09   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 09/18] target/arm: Implement MVE VSHLL Peter Maydell
2021-06-28 16:18   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 10/18] target/arm: Implement MVE VSRI, VSLI Peter Maydell
2021-06-28 16:26   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 11/18] target/arm: Implement MVE VSHRN, VRSHRN Peter Maydell
2021-06-28 16:30   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 12/18] target/arm: Implement MVE saturating narrowing shifts Peter Maydell
2021-06-28 16:38   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 13/18] target/arm: Implement MVE VSHLC Peter Maydell
2021-06-28 16:39   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 14/18] target/arm: Implement MVE VADDLV Peter Maydell
2021-06-28 16:47   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 15/18] target/arm: Implement MVE long shifts by immediate Peter Maydell
2021-06-28 16:54   ` Richard Henderson
2021-06-28 17:45     ` Richard Henderson
2021-06-29 15:56       ` Peter Maydell
2021-06-29 16:13         ` Richard Henderson
2021-06-28 13:58 ` [PATCH 16/18] target/arm: Implement MVE long shifts by register Peter Maydell
2021-06-28 17:07   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 17/18] target/arm: Implement MVE shifts by immediate Peter Maydell
2021-06-28 17:38   ` Richard Henderson
2021-06-28 13:58 ` [PATCH 18/18] target/arm: Implement MVE shifts by register Peter Maydell
2021-06-28 17:41   ` Richard Henderson
2021-06-28 14:18 ` [PATCH 00/18] target/arm: Second slice of MVE implementation no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210628135835.6690-7-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.