From: liuzhiwei <zhiwei_liu@c-sky.com>
To: Alistair.Francis@wdc.com, palmer@sifive.com,
sagark@eecs.berkeley.edu, kbastian@mail.uni-paderborn.de,
riku.voipio@iki.fi, laurent@vivier.eu, wenmeng_zhang@c-sky.com
Cc: qemu-riscv@nongnu.org, qemu-devel@nongnu.org,
wxy194768@alibaba-inc.com, LIU Zhiwei <zhiwei_liu@c-sky.com>
Subject: [Qemu-devel] [PATCH v2 10/17] RISC-V: add vector extension integer instructions part3, cmp/min/max
Date: Wed, 11 Sep 2019 14:25:34 +0800 [thread overview]
Message-ID: <1568183141-67641-11-git-send-email-zhiwei_liu@c-sky.com> (raw)
In-Reply-To: <1568183141-67641-1-git-send-email-zhiwei_liu@c-sky.com>
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
---
target/riscv/helper.h | 29 +
target/riscv/insn32.decode | 29 +
target/riscv/insn_trans/trans_rvv.inc.c | 29 +
target/riscv/vector_helper.c | 2280 +++++++++++++++++++++++++++++++
4 files changed, 2367 insertions(+)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 28863e2..7354b12 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -200,5 +200,34 @@ DEF_HELPER_5(vector_vnsra_vv, void, env, i32, i32, i32, i32)
DEF_HELPER_5(vector_vnsra_vx, void, env, i32, i32, i32, i32)
DEF_HELPER_5(vector_vnsra_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vminu_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vminu_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmin_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmin_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmaxu_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmaxu_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmax_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmax_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmseq_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmseq_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmseq_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsne_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsne_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsne_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsltu_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsltu_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmslt_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmslt_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsleu_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsleu_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsleu_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsle_vv, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsle_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsle_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsgtu_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsgtu_vi, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsgt_vx, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vector_vmsgt_vi, void, env, i32, i32, i32, i32)
+
DEF_HELPER_4(vector_vsetvli, void, env, i32, i32, i32)
DEF_HELPER_4(vector_vsetvl, void, env, i32, i32, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 19710f5..1ff0b08 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -340,5 +340,34 @@ vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm
vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm
vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm
+vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm
+vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm
+vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm
+vmsne_vv 011001 . ..... ..... 000 ..... 1010111 @r_vm
+vmsne_vx 011001 . ..... ..... 100 ..... 1010111 @r_vm
+vmsne_vi 011001 . ..... ..... 011 ..... 1010111 @r_vm
+vmsltu_vv 011010 . ..... ..... 000 ..... 1010111 @r_vm
+vmsltu_vx 011010 . ..... ..... 100 ..... 1010111 @r_vm
+vmslt_vv 011011 . ..... ..... 000 ..... 1010111 @r_vm
+vmslt_vx 011011 . ..... ..... 100 ..... 1010111 @r_vm
+vmsleu_vv 011100 . ..... ..... 000 ..... 1010111 @r_vm
+vmsleu_vx 011100 . ..... ..... 100 ..... 1010111 @r_vm
+vmsleu_vi 011100 . ..... ..... 011 ..... 1010111 @r_vm
+vmsle_vv 011101 . ..... ..... 000 ..... 1010111 @r_vm
+vmsle_vx 011101 . ..... ..... 100 ..... 1010111 @r_vm
+vmsle_vi 011101 . ..... ..... 011 ..... 1010111 @r_vm
+vmsgtu_vx 011110 . ..... ..... 100 ..... 1010111 @r_vm
+vmsgtu_vi 011110 . ..... ..... 011 ..... 1010111 @r_vm
+vmsgt_vx 011111 . ..... ..... 100 ..... 1010111 @r_vm
+vmsgt_vi 011111 . ..... ..... 011 ..... 1010111 @r_vm
+vminu_vv 000100 . ..... ..... 000 ..... 1010111 @r_vm
+vminu_vx 000100 . ..... ..... 100 ..... 1010111 @r_vm
+vmin_vv 000101 . ..... ..... 000 ..... 1010111 @r_vm
+vmin_vx 000101 . ..... ..... 100 ..... 1010111 @r_vm
+vmaxu_vv 000110 . ..... ..... 000 ..... 1010111 @r_vm
+vmaxu_vx 000110 . ..... ..... 100 ..... 1010111 @r_vm
+vmax_vv 000111 . ..... ..... 000 ..... 1010111 @r_vm
+vmax_vx 000111 . ..... ..... 100 ..... 1010111 @r_vm
+
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c b/target/riscv/insn_trans/trans_rvv.inc.c
index 6af29d0..cd5ab07 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -229,5 +229,34 @@ GEN_VECTOR_R_VM(vnsra_vv)
GEN_VECTOR_R_VM(vnsra_vx)
GEN_VECTOR_R_VM(vnsra_vi)
+GEN_VECTOR_R_VM(vmseq_vv)
+GEN_VECTOR_R_VM(vmseq_vx)
+GEN_VECTOR_R_VM(vmseq_vi)
+GEN_VECTOR_R_VM(vmsne_vv)
+GEN_VECTOR_R_VM(vmsne_vx)
+GEN_VECTOR_R_VM(vmsne_vi)
+GEN_VECTOR_R_VM(vmsltu_vv)
+GEN_VECTOR_R_VM(vmsltu_vx)
+GEN_VECTOR_R_VM(vmslt_vv)
+GEN_VECTOR_R_VM(vmslt_vx)
+GEN_VECTOR_R_VM(vmsleu_vv)
+GEN_VECTOR_R_VM(vmsleu_vx)
+GEN_VECTOR_R_VM(vmsleu_vi)
+GEN_VECTOR_R_VM(vmsle_vv)
+GEN_VECTOR_R_VM(vmsle_vx)
+GEN_VECTOR_R_VM(vmsle_vi)
+GEN_VECTOR_R_VM(vmsgtu_vx)
+GEN_VECTOR_R_VM(vmsgtu_vi)
+GEN_VECTOR_R_VM(vmsgt_vx)
+GEN_VECTOR_R_VM(vmsgt_vi)
+GEN_VECTOR_R_VM(vminu_vv)
+GEN_VECTOR_R_VM(vminu_vx)
+GEN_VECTOR_R_VM(vmin_vv)
+GEN_VECTOR_R_VM(vmin_vx)
+GEN_VECTOR_R_VM(vmaxu_vv)
+GEN_VECTOR_R_VM(vmaxu_vx)
+GEN_VECTOR_R_VM(vmax_vv)
+GEN_VECTOR_R_VM(vmax_vx)
+
GEN_VECTOR_R2_ZIMM(vsetvli)
GEN_VECTOR_R(vsetvl)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 298a10a..fbf2145 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -8608,3 +8608,2283 @@ void VECTOR_HELPER(vnsra_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
env->vfp.vstart = 0;
}
+void VECTOR_HELPER(vmseq_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u8[j] ==
+ env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u16[j] ==
+ env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u32[j] ==
+ env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u64[j] ==
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmseq_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)env->gpr[rs1] == env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)env->gpr[rs1] == env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)env->gpr[rs1] == env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)extend_gpr(env->gpr[rs1]) ==
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmseq_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)sign_extend(rs1, 5)
+ == env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)sign_extend(rs1, 5)
+ == env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)sign_extend(rs1, 5)
+ == env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)sign_extend(rs1, 5) ==
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsne_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u8[j] !=
+ env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u16[j] !=
+ env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u32[j] !=
+ env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u64[j] !=
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsne_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)env->gpr[rs1] != env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)env->gpr[rs1] != env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)env->gpr[rs1] != env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)extend_gpr(env->gpr[rs1]) !=
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsne_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)sign_extend(rs1, 5)
+ != env->vfp.vreg[src2].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)sign_extend(rs1, 5)
+ != env->vfp.vreg[src2].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)sign_extend(rs1, 5)
+ != env->vfp.vreg[src2].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)sign_extend(rs1, 5) !=
+ env->vfp.vreg[src2].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsltu_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] <
+ env->vfp.vreg[src1].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] <
+ env->vfp.vreg[src1].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] <
+ env->vfp.vreg[src1].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] <
+ env->vfp.vreg[src1].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsltu_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] < (uint8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] < (uint16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] < (uint32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] <
+ (uint64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmslt_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] <
+ env->vfp.vreg[src1].s8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] <
+ env->vfp.vreg[src1].s16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] <
+ env->vfp.vreg[src1].s32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] <
+ env->vfp.vreg[src1].s64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmslt_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] < (int8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] < (int16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] < (int32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] <
+ (int64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsleu_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] <=
+ env->vfp.vreg[src1].u8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] <=
+ env->vfp.vreg[src1].u16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] <=
+ env->vfp.vreg[src1].u32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] <=
+ env->vfp.vreg[src1].u64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsleu_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] <= (uint8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] <= (uint16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] <= (uint32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] <=
+ (uint64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsleu_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] <= (uint8_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] <= (uint16_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] <= (uint32_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] <=
+ (uint64_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsle_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] <=
+ env->vfp.vreg[src1].s8[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] <=
+ env->vfp.vreg[src1].s16[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] <=
+ env->vfp.vreg[src1].s32[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] <=
+ env->vfp.vreg[src1].s64[j]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsle_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] <= (int8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] <= (int16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] <= (int32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] <=
+ (int64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsle_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] <=
+ (int8_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] <=
+ (int16_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] <=
+ (int32_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] <=
+ sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsgtu_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] > (uint8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] > (uint16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] > (uint32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] >
+ (uint64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsgtu_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u8[j] > (uint8_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u16[j] > (uint16_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u32[j] > (uint32_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].u64[j] >
+ (uint64_t)rs1) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmsgt_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] > (int8_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] > (int16_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] > (int32_t)env->gpr[rs1]) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] >
+ (int64_t)extend_gpr(env->gpr[rs1])) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmsgt_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, vlmax;
+
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+
+ vector_lmul_check_reg(env, lmul, rs2, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s8[j] >
+ (int8_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s16[j] >
+ (int16_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s32[j] >
+ (int32_t)sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src2].s64[j] >
+ sign_extend(rs1, 5)) {
+ vector_mask_result(env, rd, width, lmul, i, 1);
+ } else {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ if (width <= 64) {
+ vector_mask_result(env, rd, width, lmul, i, 0);
+ } else {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vminu_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u8[j] <=
+ env->vfp.vreg[src2].u8[j]) {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src1].u8[j];
+ } else {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src2].u8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u16[j] <=
+ env->vfp.vreg[src2].u16[j]) {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src1].u16[j];
+ } else {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src2].u16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u32[j] <=
+ env->vfp.vreg[src2].u32[j]) {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src1].u32[j];
+ } else {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src2].u32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u64[j] <=
+ env->vfp.vreg[src2].u64[j]) {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src1].u64[j];
+ } else {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src2].u64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vminu_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].u8[j]) {
+ env->vfp.vreg[dest].u8[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src2].u8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].u16[j]) {
+ env->vfp.vreg[dest].u16[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src2].u16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].u32[j]) {
+ env->vfp.vreg[dest].u32[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src2].u32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)extend_gpr(env->gpr[rs1]) <=
+ env->vfp.vreg[src2].u64[j]) {
+ env->vfp.vreg[dest].u64[j] =
+ (uint64_t)extend_gpr(env->gpr[rs1]);
+ } else {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src2].u64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmin_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s8[j] <=
+ env->vfp.vreg[src2].s8[j]) {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src1].s8[j];
+ } else {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src2].s8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s16[j] <=
+ env->vfp.vreg[src2].s16[j]) {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src1].s16[j];
+ } else {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src2].s16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s32[j] <=
+ env->vfp.vreg[src2].s32[j]) {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src1].s32[j];
+ } else {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src2].s32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s64[j] <=
+ env->vfp.vreg[src2].s64[j]) {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src1].s64[j];
+ } else {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src2].s64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmin_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int8_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].s8[j]) {
+ env->vfp.vreg[dest].s8[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src2].s8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int16_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].s16[j]) {
+ env->vfp.vreg[dest].s16[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src2].s16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int32_t)env->gpr[rs1] <=
+ env->vfp.vreg[src2].s32[j]) {
+ env->vfp.vreg[dest].s32[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src2].s32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int64_t)extend_gpr(env->gpr[rs1]) <=
+ env->vfp.vreg[src2].s64[j]) {
+ env->vfp.vreg[dest].s64[j] =
+ (int64_t)extend_gpr(env->gpr[rs1]);
+ } else {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src2].s64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmaxu_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u8[j] >=
+ env->vfp.vreg[src2].u8[j]) {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src1].u8[j];
+ } else {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src2].u8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u16[j] >=
+ env->vfp.vreg[src2].u16[j]) {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src1].u16[j];
+ } else {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src2].u16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u32[j] >=
+ env->vfp.vreg[src2].u32[j]) {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src1].u32[j];
+ } else {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src2].u32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].u64[j] >=
+ env->vfp.vreg[src2].u64[j]) {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src1].u64[j];
+ } else {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src2].u64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmaxu_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint8_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].u8[j]) {
+ env->vfp.vreg[dest].u8[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u8[j] =
+ env->vfp.vreg[src2].u8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint16_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].u16[j]) {
+ env->vfp.vreg[dest].u16[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u16[j] =
+ env->vfp.vreg[src2].u16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint32_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].u32[j]) {
+ env->vfp.vreg[dest].u32[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].u32[j] =
+ env->vfp.vreg[src2].u32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((uint64_t)extend_gpr(env->gpr[rs1]) >=
+ env->vfp.vreg[src2].u64[j]) {
+ env->vfp.vreg[dest].u64[j] =
+ (uint64_t)extend_gpr(env->gpr[rs1]);
+ } else {
+ env->vfp.vreg[dest].u64[j] =
+ env->vfp.vreg[src2].u64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
+void VECTOR_HELPER(vmax_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src1, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs1, false);
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src1 = rs1 + (i / (VLEN / width));
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s8[j] >=
+ env->vfp.vreg[src2].s8[j]) {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src1].s8[j];
+ } else {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src2].s8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s16[j] >=
+ env->vfp.vreg[src2].s16[j]) {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src1].s16[j];
+ } else {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src2].s16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s32[j] >=
+ env->vfp.vreg[src2].s32[j]) {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src1].s32[j];
+ } else {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src2].s32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if (env->vfp.vreg[src1].s64[j] >=
+ env->vfp.vreg[src2].s64[j]) {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src1].s64[j];
+ } else {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src2].s64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+void VECTOR_HELPER(vmax_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1,
+ uint32_t rs2, uint32_t rd)
+{
+ int i, j, vl;
+ uint32_t lmul, width, src2, dest, vlmax;
+
+ vl = env->vfp.vl;
+ lmul = vector_get_lmul(env);
+ width = vector_get_width(env);
+ vlmax = vector_get_vlmax(env);
+
+ if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ return;
+ }
+ vector_lmul_check_reg(env, lmul, rs2, false);
+ vector_lmul_check_reg(env, lmul, rd, false);
+
+ for (i = 0; i < vlmax; i++) {
+ src2 = rs2 + (i / (VLEN / width));
+ dest = rd + (i / (VLEN / width));
+ j = i % (VLEN / width);
+ if (i < env->vfp.vstart) {
+ continue;
+ } else if (i < vl) {
+ switch (width) {
+ case 8:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int8_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].s8[j]) {
+ env->vfp.vreg[dest].s8[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s8[j] =
+ env->vfp.vreg[src2].s8[j];
+ }
+ }
+ break;
+ case 16:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int16_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].s16[j]) {
+ env->vfp.vreg[dest].s16[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s16[j] =
+ env->vfp.vreg[src2].s16[j];
+ }
+ }
+ break;
+ case 32:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int32_t)env->gpr[rs1] >=
+ env->vfp.vreg[src2].s32[j]) {
+ env->vfp.vreg[dest].s32[j] =
+ env->gpr[rs1];
+ } else {
+ env->vfp.vreg[dest].s32[j] =
+ env->vfp.vreg[src2].s32[j];
+ }
+ }
+ break;
+ case 64:
+ if (vector_elem_mask(env, vm, width, lmul, i)) {
+ if ((int64_t)extend_gpr(env->gpr[rs1]) >=
+ env->vfp.vreg[src2].s64[j]) {
+ env->vfp.vreg[dest].s64[j] =
+ (int64_t)extend_gpr(env->gpr[rs1]);
+ } else {
+ env->vfp.vreg[dest].s64[j] =
+ env->vfp.vreg[src2].s64[j];
+ }
+ }
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ break;
+ }
+ } else {
+ vector_tail_common(env, dest, j, width);
+ }
+ }
+ env->vfp.vstart = 0;
+}
+
--
2.7.4
next prev parent reply other threads:[~2019-09-11 6:49 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-11 6:25 [Qemu-devel] [PATCH v2 00/17] RISC-V: support vector extension liuzhiwei
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 01/17] RISC-V: add vfp field in CPURISCVState liuzhiwei
2019-09-11 14:51 ` Chih-Min Chao
2019-09-11 22:39 ` Richard Henderson
2019-09-12 14:53 ` Chih-Min Chao
2019-09-12 15:06 ` Richard Henderson
2019-09-17 8:09 ` liuzhiwei
2019-09-11 22:32 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 02/17] RISC-V: turn on vector extension from command line by cfg.ext_v Property liuzhiwei
2019-09-11 15:00 ` Chih-Min Chao
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 03/17] RISC-V: support vector extension csr liuzhiwei
2019-09-11 15:25 ` [Qemu-devel] [Qemu-riscv] " Chih-Min Chao
2019-09-11 22:43 ` [Qemu-devel] " Richard Henderson
2019-09-14 13:58 ` Palmer Dabbelt
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 04/17] RISC-V: add vector extension configure instruction liuzhiwei
2019-09-11 16:04 ` [Qemu-devel] [Qemu-riscv] " Chih-Min Chao
2019-09-11 23:09 ` [Qemu-devel] " Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 05/17] RISC-V: add vector extension load and store instructions liuzhiwei
2019-09-12 14:23 ` Richard Henderson
2020-01-08 1:32 ` LIU Zhiwei
2020-01-08 2:08 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 06/17] RISC-V: add vector extension fault-only-first implementation liuzhiwei
2019-09-12 14:32 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 07/17] RISC-V: add vector extension atomic instructions liuzhiwei
2019-09-12 14:57 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 08/17] RISC-V: add vector extension integer instructions part1, add/sub/adc/sbc liuzhiwei
2019-09-12 15:27 ` Richard Henderson
2019-09-12 15:35 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 09/17] RISC-V: add vector extension integer instructions part2, bit/shift liuzhiwei
2019-09-12 16:41 ` Richard Henderson
2019-09-11 6:25 ` liuzhiwei [this message]
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 11/17] RISC-V: add vector extension integer instructions part4, mul/div/merge liuzhiwei
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 12/17] RISC-V: add vector extension fixed point instructions liuzhiwei
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 13/17] RISC-V: add vector extension float instruction part1, add/sub/mul/div liuzhiwei
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 14/17] RISC-V: add vector extension float instructions part2, sqrt/cmp/cvt/others liuzhiwei
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 15/17] RISC-V: add vector extension reduction instructions liuzhiwei
2019-09-12 16:54 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 16/17] RISC-V: add vector extension mask instructions liuzhiwei
2019-09-12 17:07 ` Richard Henderson
2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 17/17] RISC-V: add vector extension premutation instructions liuzhiwei
2019-09-12 17:13 ` Richard Henderson
2019-09-11 7:00 ` [Qemu-devel] [PATCH v2 00/17] RISC-V: support vector extension Aleksandar Markovic
2019-09-14 12:59 ` Palmer Dabbelt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1568183141-67641-11-git-send-email-zhiwei_liu@c-sky.com \
--to=zhiwei_liu@c-sky.com \
--cc=Alistair.Francis@wdc.com \
--cc=kbastian@mail.uni-paderborn.de \
--cc=laurent@vivier.eu \
--cc=palmer@sifive.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=riku.voipio@iki.fi \
--cc=sagark@eecs.berkeley.edu \
--cc=wenmeng_zhang@c-sky.com \
--cc=wxy194768@alibaba-inc.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).