qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, qemu-arm@nongnu.org,
	alex.bennee@linaro.org, Stephen Long <steplong@quicinc.com>
Subject: [PATCH v4 42/78] target/arm: Implement SVE2 HISTCNT, HISTSEG
Date: Tue,  9 Mar 2021 08:20:05 -0800	[thread overview]
Message-ID: <20210309162041.23124-43-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210309162041.23124-1-richard.henderson@linaro.org>

From: Stephen Long <steplong@quicinc.com>

Signed-off-by: Stephen Long <steplong@quicinc.com>
Message-Id: <20200416173109.8856-1-steplong@quicinc.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: Fix overlap between output and input vectors.
v4: Fix histseg counting (zhiwei).
---
 target/arm/helper-sve.h    |   7 ++
 target/arm/sve.decode      |   6 ++
 target/arm/sve_helper.c    | 131 +++++++++++++++++++++++++++++++++++++
 target/arm/translate-sve.c |  19 ++++++
 4 files changed, 163 insertions(+)

diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 98e6b57e38..507a2fea8e 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2551,6 +2551,13 @@ DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG,
 DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG,
                    i32, ptr, ptr, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_5(sve2_histcnt_s, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
+                   void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 388bf92acf..8f501a083c 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -146,6 +146,7 @@
                 &rprrr_esz rn=%reg_movprfx
 @rdn_pg_rm_ra   ........ esz:2 . ra:5  ... pg:3 rm:5 rd:5 \
                 &rprrr_esz rn=%reg_movprfx
+@rd_pg_rn_rm   ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5       &rprr_esz
 
 # One register operand, with governing predicate, vector element size
 @rd_pg_rn       ........ esz:2 ... ... ... pg:3 rn:5 rd:5       &rpr_esz
@@ -1336,6 +1337,11 @@ RSUBHNT         01000101 .. 1 ..... 011 111 ..... .....  @rd_rn_rm
 MATCH           01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
 NMATCH          01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
 
+### SVE2 Histogram Computation
+
+HISTCNT         01000101 .. 1 ..... 110 ... ..... .....  @rd_pg_rn_rm
+HISTSEG         01000101 .. 1 ..... 101 000 ..... .....  @rd_rn_rm
+
 ## SVE2 floating-point pairwise operations
 
 FADDP           01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 860ab564cd..88a5e64277 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -7094,3 +7094,134 @@ DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
 DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
 
 #undef DO_PPZZ_MATCH
+
+void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg,
+                            uint32_t desc)
+{
+    ARMVectorReg scratch;
+    intptr_t i, j;
+    intptr_t opr_sz = simd_oprsz(desc);
+    uint32_t *d = vd, *n = vn, *m = vm;
+    uint8_t *pg = vg;
+
+    if (d == n) {
+        n = memcpy(&scratch, n, opr_sz);
+        if (d == m) {
+            m = n;
+        }
+    } else if (d == m) {
+        m = memcpy(&scratch, m, opr_sz);
+    }
+
+    for (i = 0; i < opr_sz; i += 4) {
+        uint64_t count = 0;
+        uint8_t pred;
+
+        pred = pg[H1(i >> 3)] >> (i & 7);
+        if (pred & 1) {
+            uint32_t nn = n[H4(i >> 2)];
+
+            for (j = 0; j <= i; j += 4) {
+                pred = pg[H1(j >> 3)] >> (j & 7);
+                if ((pred & 1) && nn == m[H4(j >> 2)]) {
+                    ++count;
+                }
+            }
+        }
+        d[H4(i >> 2)] = count;
+    }
+}
+
+void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg,
+                            uint32_t desc)
+{
+    ARMVectorReg scratch;
+    intptr_t i, j;
+    intptr_t opr_sz = simd_oprsz(desc);
+    uint64_t *d = vd, *n = vn, *m = vm;
+    uint8_t *pg = vg;
+
+    if (d == n) {
+        n = memcpy(&scratch, n, opr_sz);
+        if (d == m) {
+            m = n;
+        }
+    } else if (d == m) {
+        m = memcpy(&scratch, m, opr_sz);
+    }
+
+    for (i = 0; i < opr_sz / 8; ++i) {
+        uint64_t count = 0;
+        if (pg[H1(i)] & 1) {
+            uint64_t nn = n[i];
+            for (j = 0; j <= i; ++j) {
+                if ((pg[H1(j)] & 1) && nn == m[j]) {
+                    ++count;
+                }
+            }
+        }
+        d[i] = count;
+    }
+}
+
+/*
+ * Returns the number of bytes in m0 and m1 that match n.
+ * Unlike do_match2 we don't just need true/false, we need an exact count.
+ * This requires two extra logical operations.
+ */
+static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1)
+{
+    const uint64_t mask = dup_const(MO_8, 0x7f);
+    uint64_t cmp0, cmp1;
+
+    cmp1 = dup_const(MO_8, n);
+    cmp0 = cmp1 ^ m0;
+    cmp1 = cmp1 ^ m1;
+
+    /*
+     * 1: clear msb of each byte to avoid carry to next byte (& mask)
+     * 2: carry in to msb if byte != 0 (+ mask)
+     * 3: set msb if cmp has msb set (| cmp)
+     * 4: set ~msb to ignore them (| mask)
+     * We now have 0xff for byte != 0 or 0x7f for byte == 0.
+     * 5: invert, resulting in 0x80 if and only if byte == 0.
+     */
+    cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask);
+    cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask);
+
+    /*
+     * Combine the two compares in a way that the bits do
+     * not overlap, and so preserves the count of set bits.
+     * If the host has an efficient instruction for ctpop,
+     * then ctpop(x) + ctpop(y) has the same number of
+     * operations as ctpop(x | (y >> 1)).  If the host does
+     * not have an efficient ctpop, then we only want to
+     * use it once.
+     */
+    return ctpop64(cmp0 | (cmp1 >> 1));
+}
+
+void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, j;
+    intptr_t opr_sz = simd_oprsz(desc);
+
+    for (i = 0; i < opr_sz; i += 16) {
+        uint64_t n0 = *(uint64_t *)(vn + i);
+        uint64_t m0 = *(uint64_t *)(vm + i);
+        uint64_t n1 = *(uint64_t *)(vn + i + 8);
+        uint64_t m1 = *(uint64_t *)(vm + i + 8);
+        uint64_t out0 = 0;
+        uint64_t out1 = 0;
+
+        for (j = 0; j < 64; j += 8) {
+            uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1);
+            uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1);
+            out0 |= cnt0 << j;
+            out1 |= cnt1 << j;
+        }
+
+        *(uint64_t *)(vd + i) = out0;
+        *(uint64_t *)(vd + i + 8) = out1;
+    }
+}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 08622432bd..acda88cb2c 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -7505,6 +7505,25 @@ static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a)                  \
 DO_SVE2_PPZZ_MATCH(MATCH, match)
 DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
 
+static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a)
+{
+    static gen_helper_gvec_4 * const fns[2] = {
+        gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
+    };
+    if (a->esz < 2) {
+        return false;
+    }
+    return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]);
+}
+
+static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a)
+{
+    if (a->esz != 0) {
+        return false;
+    }
+    return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg);
+}
+
 static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
                             gen_helper_gvec_4_ptr *fn)
 {
-- 
2.25.1



  parent reply	other threads:[~2021-03-09 17:48 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-09 16:19 [PATCH v4 00/78] target/arm: Implement SVE2 Richard Henderson
2021-03-09 16:19 ` [PATCH v4 01/78] target/arm: Add ID_AA64ZFR0 fields and isar_feature_aa64_sve2 Richard Henderson
2021-03-09 16:19 ` [PATCH v4 02/78] target/arm: Implement SVE2 Integer Multiply - Unpredicated Richard Henderson
2021-03-09 16:19 ` [PATCH v4 03/78] target/arm: Implement SVE2 integer pairwise add and accumulate long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 04/78] target/arm: Implement SVE2 integer unary operations (predicated) Richard Henderson
2021-03-09 16:19 ` [PATCH v4 05/78] target/arm: Split out saturating/rounding shifts from neon Richard Henderson
2021-03-09 16:19 ` [PATCH v4 06/78] target/arm: Implement SVE2 saturating/rounding bitwise shift left (predicated) Richard Henderson
2021-03-09 16:19 ` [PATCH v4 07/78] target/arm: Implement SVE2 integer halving add/subtract (predicated) Richard Henderson
2021-03-09 16:19 ` [PATCH v4 08/78] target/arm: Implement SVE2 integer pairwise arithmetic Richard Henderson
2021-03-09 16:19 ` [PATCH v4 09/78] target/arm: Implement SVE2 saturating add/subtract (predicated) Richard Henderson
2021-03-09 16:19 ` [PATCH v4 10/78] target/arm: Implement SVE2 integer add/subtract long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 11/78] target/arm: Implement SVE2 integer add/subtract interleaved long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 12/78] target/arm: Implement SVE2 integer add/subtract wide Richard Henderson
2021-03-09 16:19 ` [PATCH v4 13/78] target/arm: Implement SVE2 integer multiply long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 14/78] target/arm: Implement PMULLB and PMULLT Richard Henderson
2021-03-09 16:19 ` [PATCH v4 15/78] target/arm: Implement SVE2 bitwise shift left long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 16/78] target/arm: Implement SVE2 bitwise exclusive-or interleaved Richard Henderson
2021-03-09 16:19 ` [PATCH v4 17/78] target/arm: Implement SVE2 bitwise permute Richard Henderson
2021-03-09 16:19 ` [PATCH v4 18/78] target/arm: Implement SVE2 complex integer add Richard Henderson
2021-03-09 16:19 ` [PATCH v4 19/78] target/arm: Implement SVE2 integer absolute difference and accumulate long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 20/78] target/arm: Implement SVE2 integer add/subtract long with carry Richard Henderson
2021-03-09 16:19 ` [PATCH v4 21/78] target/arm: Implement SVE2 bitwise shift right and accumulate Richard Henderson
2021-03-09 16:19 ` [PATCH v4 22/78] target/arm: Implement SVE2 bitwise shift and insert Richard Henderson
2021-03-09 16:19 ` [PATCH v4 23/78] target/arm: Implement SVE2 integer absolute difference and accumulate Richard Henderson
2021-03-09 16:19 ` [PATCH v4 24/78] target/arm: Implement SVE2 saturating extract narrow Richard Henderson
2021-03-09 16:19 ` [PATCH v4 25/78] target/arm: Implement SVE2 floating-point pairwise Richard Henderson
2021-03-09 16:19 ` [PATCH v4 26/78] target/arm: Implement SVE2 SHRN, RSHRN Richard Henderson
2021-03-09 16:19 ` [PATCH v4 27/78] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN Richard Henderson
2021-03-09 16:19 ` [PATCH v4 28/78] target/arm: Implement SVE2 UQSHRN, UQRSHRN Richard Henderson
2021-03-09 16:19 ` [PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN Richard Henderson
2021-03-09 16:19 ` [PATCH v4 30/78] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS Richard Henderson
2021-03-09 16:19 ` [PATCH v4 31/78] target/arm: Implement SVE2 WHILERW, WHILEWR Richard Henderson
2021-03-09 16:19 ` [PATCH v4 32/78] target/arm: Implement SVE2 bitwise ternary operations Richard Henderson
2021-03-09 16:19 ` [PATCH v4 33/78] target/arm: Implement SVE2 MATCH, NMATCH Richard Henderson
2021-03-09 16:19 ` [PATCH v4 34/78] target/arm: Implement SVE2 saturating multiply-add long Richard Henderson
2021-03-09 16:19 ` [PATCH v4 35/78] target/arm: Implement SVE2 saturating multiply-add high Richard Henderson
2021-03-09 16:19 ` [PATCH v4 36/78] target/arm: Implement SVE2 integer multiply-add long Richard Henderson
2021-03-09 16:20 ` [PATCH v4 37/78] target/arm: Implement SVE2 complex integer multiply-add Richard Henderson
2021-03-09 16:20 ` [PATCH v4 38/78] target/arm: Implement SVE2 ADDHNB, ADDHNT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 39/78] target/arm: Implement SVE2 RADDHNB, RADDHNT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 40/78] target/arm: Implement SVE2 SUBHNB, SUBHNT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 41/78] target/arm: Implement SVE2 RSUBHNB, RSUBHNT Richard Henderson
2021-03-09 16:20 ` Richard Henderson [this message]
2021-03-09 16:20 ` [PATCH v4 43/78] target/arm: Implement SVE2 XAR Richard Henderson
2021-03-09 16:20 ` [PATCH v4 44/78] target/arm: Implement SVE2 scatter store insns Richard Henderson
2021-03-09 16:20 ` [PATCH v4 45/78] target/arm: Implement SVE2 gather load insns Richard Henderson
2021-03-09 16:20 ` [PATCH v4 46/78] target/arm: Implement SVE2 FMMLA Richard Henderson
2021-03-09 16:20 ` [PATCH v4 47/78] target/arm: Implement SVE2 SPLICE, EXT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 48/78] target/arm: Pass separate addend to {U, S}DOT helpers Richard Henderson
2021-03-09 16:20 ` [PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers Richard Henderson
2021-03-09 16:20 ` [PATCH v4 50/78] target/arm: Split out formats for 2 vectors + 1 index Richard Henderson
2021-03-09 16:20 ` [PATCH v4 51/78] target/arm: Split out formats for 3 " Richard Henderson
2021-03-09 16:20 ` [PATCH v4 52/78] target/arm: Implement SVE2 integer multiply (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 53/78] target/arm: Implement SVE2 integer multiply-add (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 54/78] target/arm: Implement SVE2 saturating multiply-add high (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 55/78] target/arm: Implement SVE2 saturating multiply-add (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 57/78] target/arm: Implement SVE2 saturating multiply (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 58/78] target/arm: Implement SVE2 signed saturating doubling multiply high Richard Henderson
2021-03-09 16:20 ` [PATCH v4 59/78] target/arm: Implement SVE2 saturating multiply high (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 60/78] target/arm: Implement SVE2 multiply-add long (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 61/78] target/arm: Implement SVE2 complex integer multiply-add (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (indexed) Richard Henderson
2021-03-09 16:20 ` [PATCH v4 63/78] target/arm: Implement SVE mixed sign dot product Richard Henderson
2021-03-09 16:20 ` [PATCH v4 64/78] target/arm: Implement SVE2 crypto unary operations Richard Henderson
2021-03-09 16:20 ` [PATCH v4 65/78] target/arm: Implement SVE2 crypto destructive binary operations Richard Henderson
2021-03-09 16:20 ` [PATCH v4 66/78] target/arm: Implement SVE2 crypto constructive " Richard Henderson
2021-03-09 16:20 ` [PATCH v4 67/78] target/arm: Implement SVE2 TBL, TBX Richard Henderson
2021-03-09 16:20 ` [PATCH v4 68/78] target/arm: Implement SVE2 FCVTNT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 69/78] target/arm: Implement SVE2 FCVTLT Richard Henderson
2021-03-09 16:20 ` [PATCH v4 70/78] target/arm: Implement SVE2 FCVTXNT, FCVTX Richard Henderson
2021-03-09 16:20 ` [PATCH v4 71/78] target/arm: Implement SVE2 FLOGB Richard Henderson
2021-03-09 16:20 ` [PATCH v4 72/78] target/arm: Share table of sve load functions Richard Henderson
2021-03-09 16:20 ` [PATCH v4 73/78] target/arm: Implement SVE2 LD1RO Richard Henderson
2021-03-09 16:20 ` [PATCH v4 74/78] target/arm: Implement 128-bit ZIP, UZP, TRN Richard Henderson
2021-03-09 16:20 ` [PATCH v4 75/78] target/arm: Implement SVE2 bitwise shift immediate Richard Henderson
2021-03-09 16:20 ` [PATCH v4 76/78] target/arm: Implement SVE2 fp multiply-add long Richard Henderson
2021-03-09 16:20 ` [PATCH v4 77/78] target/arm: Implement SVE2 complex integer dot product Richard Henderson
2021-03-09 16:20 ` [PATCH v4 78/78] target/arm: Enable SVE2 and some extensions Richard Henderson
2021-03-09 19:28 ` [PATCH v4 00/78] target/arm: Implement SVE2 no-reply
2021-03-10 20:17 ` Peter Maydell
2021-03-11  2:33   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210309162041.23124-43-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=steplong@quicinc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).