qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
To: richard.henderson@linaro.org, alistair23@gmail.com,
	chihmin.chao@sifive.com, palmer@dabbelt.com
Cc: guoren@linux.alibaba.com, qemu-riscv@nongnu.org,
	qemu-devel@nongnu.org, wxy194768@alibaba-inc.com,
	wenmeng_zhang@c-sky.com, LIU Zhiwei <zhiwei_liu@c-sky.com>
Subject: [PATCH v6 09/61] target/riscv: add vector amo operations
Date: Tue, 17 Mar 2020 23:06:01 +0800	[thread overview]
Message-ID: <20200317150653.9008-10-zhiwei_liu@c-sky.com> (raw)
In-Reply-To: <20200317150653.9008-1-zhiwei_liu@c-sky.com>

Vector AMOs operate as if aq and rl bits were zero on each element
with regard to ordering relative to other instructions in the same hart.
Vector AMOs provide no ordering guarantee between element operations
in the same vector AMO instruction

Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
---
 target/riscv/helper.h                   |  29 +++++
 target/riscv/insn32-64.decode           |  11 ++
 target/riscv/insn32.decode              |  13 +++
 target/riscv/insn_trans/trans_rvv.inc.c | 134 ++++++++++++++++++++++
 target/riscv/internals.h                |   1 +
 target/riscv/vector_helper.c            | 143 ++++++++++++++++++++++++
 6 files changed, 331 insertions(+)

diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 72ba4d9bdb..70a4b05f75 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -240,3 +240,32 @@ DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32)
 DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32)
 DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32)
 DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32)
+#ifdef TARGET_RISCV64
+DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxorw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxord_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoorw_v_d,   void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoord_v_d,   void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomind_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32)
+#endif
+DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxorw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoorw_v_w,   void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
index 380bf791bc..86153d93fa 100644
--- a/target/riscv/insn32-64.decode
+++ b/target/riscv/insn32-64.decode
@@ -57,6 +57,17 @@ amomax_d   10100 . . ..... ..... 011 ..... 0101111 @atom_st
 amominu_d  11000 . . ..... ..... 011 ..... 0101111 @atom_st
 amomaxu_d  11100 . . ..... ..... 011 ..... 0101111 @atom_st
 
+#*** Vector AMO operations (in addition to Zvamo) ***
+vamoswapd_v     00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoaddd_v      00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoxord_v      00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoandd_v      01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoord_v       01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomind_v      10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomaxd_v      10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamominud_v     11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomaxud_v     11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+
 # *** RV64F Standard Extension (in addition to RV32F) ***
 fcvt_l_s   1100000  00010 ..... ... ..... 1010011 @r2_rm
 fcvt_lu_s  1100000  00011 ..... ... ..... 1010011 @r2_rm
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index b76c09c8c0..1330703720 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -44,6 +44,7 @@
 &u    imm rd
 &shift     shamt rs1 rd
 &atomic    aq rl rs2 rs1 rd
+&rwdvm     vm wd rd rs1 rs2
 &r2nfvm    vm rd rs1 nf
 &rnfvm     vm rd rs1 rs2 nf
 
@@ -67,6 +68,7 @@
 @r2      .......   ..... ..... ... ..... ....... %rs1 %rd
 @r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
 @r_nfvm  ... ... vm:1 ..... ..... ... ..... ....... &rnfvm %nf %rs2 %rs1 %rd
+@r_wdvm  ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
 @r2_zimm . zimm:11  ..... ... ..... ....... %rs1 %rd
 
 @hfence_gvma ....... ..... .....   ... ..... ....... %rs2 %rs1
@@ -261,6 +263,17 @@ vsxh_v     ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
 vsxw_v     ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
 vsxe_v     ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
 
+#*** Vector AMO operations are encoded under the standard AMO major opcode ***
+vamoswapw_v     00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoaddw_v      00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoxorw_v      00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoandw_v      01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoorw_v       01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamominw_v      10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamomaxw_v      10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamominuw_v     11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamomaxuw_v     11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+
 # *** new major opcode OP-V ***
 vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
 vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c b/target/riscv/insn_trans/trans_rvv.inc.c
index ce0fafde92..a8722ed9d2 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -606,3 +606,137 @@ GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
 GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
 GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
 GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
+
+/*
+ *** vector atomic operation
+ */
+typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
+        TCGv_env, TCGv_i32);
+
+static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+        uint32_t data, gen_helper_amo *fn, DisasContext *s)
+{
+    TCGv_ptr dest, mask, index;
+    TCGv base;
+    TCGv_i32 desc;
+
+    dest = tcg_temp_new_ptr();
+    mask = tcg_temp_new_ptr();
+    index = tcg_temp_new_ptr();
+    base = tcg_temp_new();
+    desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+
+    gen_get_gpr(base, rs1);
+    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+    tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
+    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+    fn(dest, mask, base, index, cpu_env, desc);
+
+    tcg_temp_free_ptr(dest);
+    tcg_temp_free_ptr(mask);
+    tcg_temp_free_ptr(index);
+    tcg_temp_free(base);
+    tcg_temp_free_i32(desc);
+    return true;
+}
+
+static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
+{
+    uint32_t data = 0;
+    gen_helper_amo *fn;
+    static gen_helper_amo *const fnsw[9] = {
+        /* no atomic operation */
+        gen_helper_vamoswapw_v_w,
+        gen_helper_vamoaddw_v_w,
+        gen_helper_vamoxorw_v_w,
+        gen_helper_vamoandw_v_w,
+        gen_helper_vamoorw_v_w,
+        gen_helper_vamominw_v_w,
+        gen_helper_vamomaxw_v_w,
+        gen_helper_vamominuw_v_w,
+        gen_helper_vamomaxuw_v_w
+    };
+#ifdef TARGET_RISCV64
+    static gen_helper_amo *const fnsd[18] = {
+        gen_helper_vamoswapw_v_d,
+        gen_helper_vamoaddw_v_d,
+        gen_helper_vamoxorw_v_d,
+        gen_helper_vamoandw_v_d,
+        gen_helper_vamoorw_v_d,
+        gen_helper_vamominw_v_d,
+        gen_helper_vamomaxw_v_d,
+        gen_helper_vamominuw_v_d,
+        gen_helper_vamomaxuw_v_d,
+        gen_helper_vamoswapd_v_d,
+        gen_helper_vamoaddd_v_d,
+        gen_helper_vamoxord_v_d,
+        gen_helper_vamoandd_v_d,
+        gen_helper_vamoord_v_d,
+        gen_helper_vamomind_v_d,
+        gen_helper_vamomaxd_v_d,
+        gen_helper_vamominud_v_d,
+        gen_helper_vamomaxud_v_d
+    };
+#endif
+
+    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+        gen_helper_exit_atomic(cpu_env);
+        s->base.is_jmp = DISAS_NORETURN;
+        return true;
+    } else {
+        if (s->sew == 3) {
+#ifdef TARGET_RISCV64
+            fn = fnsd[seq];
+#else
+            /* Check done in amo_check(). */
+            g_assert_not_reached();
+#endif
+        } else {
+            fn = fnsw[seq];
+        }
+    }
+
+    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+    data = FIELD_DP32(data, VDATA, VM, a->vm);
+    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+    data = FIELD_DP32(data, VDATA, WD, a->wd);
+    return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+/*
+ * There are two rules check here.
+ *
+ * 1. SEW must be at least as wide as the AMO memory element size.
+ *
+ * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
+ */
+static bool amo_check(DisasContext *s, arg_rwdvm* a)
+{
+    return (!s->vill && has_ext(s, RVA) &&
+            (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
+            vext_check_reg(s, a->rd, false) &&
+            vext_check_reg(s, a->rs2, false) &&
+            ((1 << s->sew) <= sizeof(target_ulong)) &&
+            ((1 << s->sew) >= 4));
+}
+
+GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
+#ifdef TARGET_RISCV64
+GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
+#endif
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 614e41437d..6a27d7c716 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -26,4 +26,5 @@ FIELD(VDATA, MLEN, 0, 8)
 FIELD(VDATA, VM, 8, 1)
 FIELD(VDATA, LMUL, 9, 2)
 FIELD(VDATA, NF, 11, 4)
+FIELD(VDATA, WD, 11, 1)
 #endif
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index f72831a523..45da43ade9 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -95,6 +95,11 @@ static inline uint32_t vext_lmul(uint32_t desc)
     return FIELD_EX32(simd_data(desc), VDATA, LMUL);
 }
 
+static uint32_t vext_wd(uint32_t desc)
+{
+    return (simd_data(desc) >> 11) & 0x1;
+}
+
 /*
  * Get vector group length in bytes. Its range is [64, 2048].
  *
@@ -684,3 +689,141 @@ GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, MO_LEUW, ldhu_w, clearl)
 GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, MO_LEUW, ldhu_d, clearq)
 GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, MO_LEUL, ldwu_w, clearl)
 GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, MO_LEUL, ldwu_d, clearq)
+
+/*
+ *** Vector AMO Operations (Zvamo)
+ */
+typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr,
+        uint32_t wd, uint32_t idx, CPURISCVState *env, uintptr_t retaddr);
+
+/* no atomic opreation for vector atomic insructions */
+#define DO_SWAP(N, M) (M)
+#define DO_AND(N, M)  (N & M)
+#define DO_XOR(N, M)  (N ^ M)
+#define DO_OR(N, M)   (N | M)
+#define DO_ADD(N, M)  (N + M)
+
+#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
+static void vext_##NAME##_noatomic_op(void *vs3,                \
+            target_ulong addr, uint32_t wd, uint32_t idx,       \
+                CPURISCVState *env, uintptr_t retaddr)          \
+{                                                               \
+    typedef int##ESZ##_t ETYPE;                                 \
+    typedef int##MSZ##_t MTYPE;                                 \
+    typedef uint##MSZ##_t UMTYPE __attribute__((unused));       \
+    ETYPE *pe3 = (ETYPE *)vs3 + H(idx);                         \
+    MTYPE a = *pe3, b = cpu_ld##SUF##_data(env, addr);          \
+    a = DO_OP(a, b);                                            \
+    cpu_st##SUF##_data(env, addr, a);                           \
+    if (wd) {                                                   \
+        *pe3 = a;                                               \
+    }                                                           \
+}
+
+/* Signed min/max */
+#define DO_MAX(N, M)  ((N) >= (M) ? (N) : (M))
+#define DO_MIN(N, M)  ((N) >= (M) ? (M) : (N))
+
+/* Unsigned min/max */
+#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
+#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
+
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w,  32, 32, H4, DO_ADD,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w,  32, 32, H4, DO_XOR,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w,  32, 32, H4, DO_AND,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w,   32, 32, H4, DO_OR,   l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w,  32, 32, H4, DO_MIN,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w,  32, 32, H4, DO_MAX,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l)
+#ifdef TARGET_RISCV64
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d,  64, 32, H8, DO_ADD,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d,  64, 64, H8, DO_ADD,  q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d,  64, 32, H8, DO_XOR,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d,  64, 64, H8, DO_XOR,  q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d,  64, 32, H8, DO_AND,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d,  64, 64, H8, DO_AND,  q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d,   64, 32, H8, DO_OR,   l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d,   64, 64, H8, DO_OR,   q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d,  64, 32, H8, DO_MIN,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d,  64, 64, H8, DO_MIN,  q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d,  64, 32, H8, DO_MAX,  l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d,  64, 64, H8, DO_MAX,  q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q)
+#endif
+
+static inline void vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+        void *vs2, CPURISCVState *env, uint32_t desc,
+        vext_get_index_addr get_index_addr,
+        vext_amo_noatomic_fn *noatomic_op,
+        clear_fn *clear_elem,
+        uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+    uint32_t i;
+    target_long addr;
+    uint32_t wd = vext_wd(desc);
+    uint32_t vm = vext_vm(desc);
+    uint32_t mlen = vext_mlen(desc);
+    uint32_t vlmax = vext_maxsz(desc) / esz;
+
+    for (i = 0; i < env->vl; i++) {
+        if (!vm && !vext_elem_mask(v0, mlen, i)) {
+            continue;
+        }
+        probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
+        probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
+    }
+    for (i = 0; i < env->vl; i++) {
+        if (!vm && !vext_elem_mask(v0, mlen, i)) {
+            continue;
+        }
+        addr = get_index_addr(base, i, vs2);
+        noatomic_op(vs3, addr, wd, i, env, ra);
+    }
+    clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz);
+}
+
+#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN)    \
+void HELPER(NAME)(void *vs3, void *v0, target_ulong base,       \
+        void *vs2, CPURISCVState *env, uint32_t desc)           \
+{                                                               \
+    vext_amo_noatomic(vs3, v0, base, vs2, env, desc,            \
+        INDEX_FN, vext_##NAME##_noatomic_op, CLEAR_FN,          \
+        sizeof(ETYPE), sizeof(MTYPE), GETPC());                 \
+}
+
+#ifdef TARGET_RISCV64
+GEN_VEXT_AMO(vamoswapw_v_d, int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoswapd_v_d, int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoaddw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoaddd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoxorw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoxord_v_d,  int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoandw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoandd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoorw_v_d,   int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamoord_v_d,   int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamominw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamomind_v_d,  int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamomaxw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamomaxd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq)
+#endif
+GEN_VEXT_AMO(vamoswapw_v_w, int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamoaddw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamoxorw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamoandw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamoorw_v_w,   int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamominw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamomaxw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)
-- 
2.23.0



  parent reply	other threads:[~2020-03-17 15:36 UTC|newest]

Thread overview: 120+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-17 15:05 [PATCH v6 00/61] target/riscv: support vector extension v0.7.1 LIU Zhiwei
2020-03-17 15:05 ` [PATCH v6 01/61] target/riscv: add vector extension field in CPURISCVState LIU Zhiwei
2020-03-17 15:05 ` [PATCH v6 02/61] target/riscv: implementation-defined constant parameters LIU Zhiwei
2020-03-17 15:05 ` [PATCH v6 03/61] target/riscv: support vector extension csr LIU Zhiwei
2020-03-17 15:05 ` [PATCH v6 04/61] target/riscv: add vector configure instruction LIU Zhiwei
2020-03-23  6:51   ` Kito Cheng
2020-03-23  7:10     ` LIU Zhiwei
2020-03-17 15:05 ` [PATCH v6 05/61] target/riscv: add an internals.h header LIU Zhiwei
2020-03-18 23:45   ` Alistair Francis
2020-03-27 23:41   ` Richard Henderson
2020-03-17 15:05 ` [PATCH v6 06/61] target/riscv: add vector stride load and store instructions LIU Zhiwei
2020-03-18 23:54   ` Alistair Francis
2020-03-17 15:05 ` [PATCH v6 07/61] target/riscv: add vector index " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 08/61] target/riscv: add fault-only-first unit stride load LIU Zhiwei
2020-03-17 15:06 ` LIU Zhiwei [this message]
2020-03-19 17:01   ` [PATCH v6 09/61] target/riscv: add vector amo operations Alistair Francis
2020-03-27 23:44   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 10/61] target/riscv: vector single-width integer add and subtract LIU Zhiwei
2020-03-20 18:31   ` Alistair Francis
2020-03-27 23:54   ` Richard Henderson
2020-03-28 14:42     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 11/61] target/riscv: vector widening " LIU Zhiwei
2020-03-19 16:28   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 12/61] target/riscv: vector integer add-with-carry / subtract-with-borrow instructions LIU Zhiwei
2020-03-19 17:29   ` Alistair Francis
2020-03-28  0:00   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 13/61] target/riscv: vector bitwise logical instructions LIU Zhiwei
2020-03-20 18:34   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 14/61] target/riscv: vector single-width bit shift instructions LIU Zhiwei
2020-03-19 20:10   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 15/61] target/riscv: vector narrowing integer right " LIU Zhiwei
2020-03-20 18:43   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 16/61] target/riscv: vector integer comparison instructions LIU Zhiwei
2020-03-25 17:32   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 17/61] target/riscv: vector integer min/max instructions LIU Zhiwei
2020-03-20 18:49   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 18/61] target/riscv: vector single-width integer multiply instructions LIU Zhiwei
2020-03-25 17:36   ` Alistair Francis
2020-03-28  0:06   ` Richard Henderson
2020-03-28 15:17     ` LIU Zhiwei
2020-03-28 15:47       ` Richard Henderson
2020-03-28 16:13         ` LIU Zhiwei
2020-03-29  4:00           ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 19/61] target/riscv: vector integer divide instructions LIU Zhiwei
2020-03-20 18:51   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 20/61] target/riscv: vector widening integer multiply instructions LIU Zhiwei
2020-03-25 17:25   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 21/61] target/riscv: vector single-width integer multiply-add instructions LIU Zhiwei
2020-03-25 17:27   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 22/61] target/riscv: vector widening " LIU Zhiwei
2020-03-25 17:42   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 23/61] target/riscv: vector integer merge and move instructions LIU Zhiwei
2020-03-26 17:57   ` Alistair Francis
2020-03-28  0:18   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 24/61] target/riscv: vector single-width saturating add and subtract LIU Zhiwei
2020-03-28  0:20   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 25/61] target/riscv: vector single-width averaging " LIU Zhiwei
2020-03-19  3:46   ` LIU Zhiwei
2020-03-28  0:32     ` Richard Henderson
2020-03-28  1:07       ` LIU Zhiwei
2020-03-28  1:22         ` Richard Henderson
2020-03-28 15:37           ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 26/61] target/riscv: vector single-width fractional multiply with rounding and saturation LIU Zhiwei
2020-03-28  1:08   ` Richard Henderson
2020-03-28 15:41     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 27/61] target/riscv: vector widening saturating scaled multiply-add LIU Zhiwei
2020-03-28  1:23   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 28/61] target/riscv: vector single-width scaling shift instructions LIU Zhiwei
2020-03-28  1:24   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 29/61] target/riscv: vector narrowing fixed-point clip instructions LIU Zhiwei
2020-03-28  1:50   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 30/61] target/riscv: vector single-width floating-point add/subtract instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 31/61] target/riscv: vector widening " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 32/61] target/riscv: vector single-width floating-point multiply/divide instructions LIU Zhiwei
2020-03-25 17:46   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 33/61] target/riscv: vector widening floating-point multiply LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 34/61] target/riscv: vector single-width floating-point fused multiply-add instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 35/61] target/riscv: vector widening " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 36/61] target/riscv: vector floating-point square-root instruction LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 37/61] target/riscv: vector floating-point min/max instructions LIU Zhiwei
2020-03-25 17:47   ` Alistair Francis
2020-03-17 15:06 ` [PATCH v6 38/61] target/riscv: vector floating-point sign-injection instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 39/61] target/riscv: vector floating-point compare instructions LIU Zhiwei
2020-03-28  2:01   ` Richard Henderson
2020-03-28 15:44     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 40/61] target/riscv: vector floating-point classify instructions LIU Zhiwei
2020-03-28  2:06   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 41/61] target/riscv: vector floating-point merge instructions LIU Zhiwei
2020-03-28  3:23   ` Richard Henderson
2020-03-28 15:47     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 42/61] target/riscv: vector floating-point/integer type-convert instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 43/61] target/riscv: widening " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 44/61] target/riscv: narrowing " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 45/61] target/riscv: vector single-width integer reduction instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 46/61] target/riscv: vector wideing " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 47/61] target/riscv: vector single-width floating-point " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 48/61] target/riscv: vector widening " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 49/61] target/riscv: vector mask-register logical instructions LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 50/61] target/riscv: vector mask population count vmpopc LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 51/61] target/riscv: vmfirst find-first-set mask bit LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 52/61] target/riscv: set-X-first " LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 53/61] target/riscv: vector iota instruction LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 54/61] target/riscv: vector element index instruction LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 55/61] target/riscv: integer extract instruction LIU Zhiwei
2020-03-28  3:36   ` Richard Henderson
2020-03-28 16:23     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 56/61] target/riscv: integer scalar move instruction LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 57/61] target/riscv: floating-point scalar move instructions LIU Zhiwei
2020-03-28  3:44   ` Richard Henderson
2020-03-28 16:31     ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 58/61] target/riscv: vector slide instructions LIU Zhiwei
2020-03-28  3:50   ` Richard Henderson
2020-03-28 13:40   ` LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 59/61] target/riscv: vector register gather instruction LIU Zhiwei
2020-03-28  3:57   ` Richard Henderson
2020-03-17 15:06 ` [PATCH v6 60/61] target/riscv: vector compress instruction LIU Zhiwei
2020-03-17 15:06 ` [PATCH v6 61/61] target/riscv: configure and turn on vector extension from command line LIU Zhiwei
2020-03-25 17:49   ` Alistair Francis
2020-03-28  4:00   ` Richard Henderson
2020-03-17 20:47 ` [PATCH v6 00/61] target/riscv: support vector extension v0.7.1 no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200317150653.9008-10-zhiwei_liu@c-sky.com \
    --to=zhiwei_liu@c-sky.com \
    --cc=alistair23@gmail.com \
    --cc=chihmin.chao@sifive.com \
    --cc=guoren@linux.alibaba.com \
    --cc=palmer@dabbelt.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-riscv@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=wenmeng_zhang@c-sky.com \
    --cc=wxy194768@alibaba-inc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).