From: liuzhiwei <zhiwei_liu@c-sky.com> To: Alistair.Francis@wdc.com, palmer@sifive.com, sagark@eecs.berkeley.edu, kbastian@mail.uni-paderborn.de, riku.voipio@iki.fi, laurent@vivier.eu, wenmeng_zhang@c-sky.com Cc: qemu-riscv@nongnu.org, qemu-devel@nongnu.org, wxy194768@alibaba-inc.com, LIU Zhiwei <zhiwei_liu@c-sky.com> Subject: [Qemu-devel] [PATCH v2 09/17] RISC-V: add vector extension integer instructions part2, bit/shift Date: Wed, 11 Sep 2019 14:25:33 +0800 [thread overview] Message-ID: <1568183141-67641-10-git-send-email-zhiwei_liu@c-sky.com> (raw) In-Reply-To: <1568183141-67641-1-git-send-email-zhiwei_liu@c-sky.com> From: LIU Zhiwei <zhiwei_liu@c-sky.com> Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 25 + target/riscv/insn32.decode | 25 + target/riscv/insn_trans/trans_rvv.inc.c | 25 + target/riscv/vector_helper.c | 1477 +++++++++++++++++++++++++++++++ 4 files changed, 1552 insertions(+) diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 31e20dc..28863e2 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -175,5 +175,30 @@ DEF_HELPER_5(vector_vwsubu_wx, void, env, i32, i32, i32, i32) DEF_HELPER_5(vector_vwsub_wv, void, env, i32, i32, i32, i32) DEF_HELPER_5(vector_vwsub_wx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vi, void, env, i32, i32, i32, i32) + DEF_HELPER_4(vector_vsetvli, void, env, i32, i32, i32) DEF_HELPER_4(vector_vsetvl, void, env, i32, i32, i32) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index fc7e498..19710f5 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -315,5 +315,30 @@ vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r +vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm +vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm +vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm +vor_vv 001010 . ..... ..... 000 ..... 1010111 @r_vm +vor_vx 001010 . ..... ..... 100 ..... 1010111 @r_vm +vor_vi 001010 . ..... ..... 011 ..... 1010111 @r_vm +vxor_vv 001011 . ..... ..... 000 ..... 1010111 @r_vm +vxor_vx 001011 . ..... ..... 100 ..... 1010111 @r_vm +vxor_vi 001011 . ..... ..... 011 ..... 1010111 @r_vm +vsll_vv 100101 . ..... ..... 000 ..... 1010111 @r_vm +vsll_vx 100101 . ..... ..... 100 ..... 1010111 @r_vm +vsll_vi 100101 . ..... ..... 011 ..... 1010111 @r_vm +vsrl_vv 101000 . ..... ..... 000 ..... 1010111 @r_vm +vsrl_vx 101000 . ..... ..... 100 ..... 1010111 @r_vm +vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm +vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm +vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm +vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm +vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm +vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm +vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm +vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm +vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm +vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm + vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/target/riscv/insn_trans/trans_rvv.inc.c b/target/riscv/insn_trans/trans_rvv.inc.c index a1c1960..6af29d0 100644 --- a/target/riscv/insn_trans/trans_rvv.inc.c +++ b/target/riscv/insn_trans/trans_rvv.inc.c @@ -204,5 +204,30 @@ GEN_VECTOR_R_VM(vwsubu_wx) GEN_VECTOR_R_VM(vwsub_wv) GEN_VECTOR_R_VM(vwsub_wx) +GEN_VECTOR_R_VM(vand_vv) +GEN_VECTOR_R_VM(vand_vx) +GEN_VECTOR_R_VM(vand_vi) +GEN_VECTOR_R_VM(vor_vv) +GEN_VECTOR_R_VM(vor_vx) +GEN_VECTOR_R_VM(vor_vi) +GEN_VECTOR_R_VM(vxor_vv) +GEN_VECTOR_R_VM(vxor_vx) +GEN_VECTOR_R_VM(vxor_vi) +GEN_VECTOR_R_VM(vsll_vv) +GEN_VECTOR_R_VM(vsll_vx) +GEN_VECTOR_R_VM(vsll_vi) +GEN_VECTOR_R_VM(vsrl_vv) +GEN_VECTOR_R_VM(vsrl_vx) +GEN_VECTOR_R_VM(vsrl_vi) +GEN_VECTOR_R_VM(vsra_vv) +GEN_VECTOR_R_VM(vsra_vx) +GEN_VECTOR_R_VM(vsra_vi) +GEN_VECTOR_R_VM(vnsrl_vv) +GEN_VECTOR_R_VM(vnsrl_vx) +GEN_VECTOR_R_VM(vnsrl_vi) +GEN_VECTOR_R_VM(vnsra_vv) +GEN_VECTOR_R_VM(vnsra_vx) +GEN_VECTOR_R_VM(vnsra_vi) + GEN_VECTOR_R2_ZIMM(vsetvli) GEN_VECTOR_R(vsetvl) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 95336c9..298a10a 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -268,6 +268,25 @@ static void vector_tail_widen(CPURISCVState *env, int vreg, int index, } } +static void vector_tail_narrow(CPURISCVState *env, int vreg, int index, + int width) +{ + switch (width) { + case 8: + env->vfp.vreg[vreg].u8[index] = 0; + break; + case 16: + env->vfp.vreg[vreg].u16[index] = 0; + break; + case 32: + env->vfp.vreg[vreg].u32[index] = 0; + break; + default: + helper_raise_exception(env, RISCV_EXCP_ILLEGAL_INST); + return; + } +} + static inline int vector_get_carry(CPURISCVState *env, int width, int lmul, int index) { @@ -7131,3 +7150,1461 @@ void VECTOR_HELPER(vwsub_wx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, } env->vfp.vstart = 0; } + +void VECTOR_HELPER(vand_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + & env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + & env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + & env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + & env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vand_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + & env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vand_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vor_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + | env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + | env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + | env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + | env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vor_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + | env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vor_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vxor_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + ^ env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + ^ env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + ^ env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + ^ env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vxor_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + ^ env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vxor_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (env->vfp.vreg[src1].u8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (env->vfp.vreg[src1].u16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (env->vfp.vreg[src1].u32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << (env->vfp.vreg[src1].u64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsrl_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (env->vfp.vreg[src1].u8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (env->vfp.vreg[src1].u16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (env->vfp.vreg[src1].u32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> (env->vfp.vreg[src1].u64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vsrl_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vsrl_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (env->vfp.vreg[src1].s8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (env->vfp.vreg[src1].s16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (env->vfp.vreg[src1].s32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> (env->vfp.vreg[src1].s64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vnsrl_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (env->vfp.vreg[src1].u8[j] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (env->vfp.vreg[src1].u16[j] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (env->vfp.vreg[src1].u32[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsrl_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (env->gpr[rs1] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (env->gpr[rs1] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsrl_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vnsra_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (env->vfp.vreg[src1].s8[j] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (env->vfp.vreg[src1].s16[j] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (env->vfp.vreg[src1].s32[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsra_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (env->gpr[rs1] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (env->gpr[rs1] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsra_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + -- 2.7.4
WARNING: multiple messages have this Message-ID (diff)
From: liuzhiwei <zhiwei_liu@c-sky.com> To: Alistair.Francis@wdc.com, palmer@sifive.com, sagark@eecs.berkeley.edu, kbastian@mail.uni-paderborn.de, riku.voipio@iki.fi, laurent@vivier.eu, wenmeng_zhang@c-sky.com Cc: wxy194768@alibaba-inc.com, qemu-devel@nongnu.org, qemu-riscv@nongnu.org, LIU Zhiwei <zhiwei_liu@c-sky.com> Subject: [Qemu-riscv] [PATCH v2 09/17] RISC-V: add vector extension integer instructions part2, bit/shift Date: Wed, 11 Sep 2019 14:25:33 +0800 [thread overview] Message-ID: <1568183141-67641-10-git-send-email-zhiwei_liu@c-sky.com> (raw) In-Reply-To: <1568183141-67641-1-git-send-email-zhiwei_liu@c-sky.com> From: LIU Zhiwei <zhiwei_liu@c-sky.com> Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> --- target/riscv/helper.h | 25 + target/riscv/insn32.decode | 25 + target/riscv/insn_trans/trans_rvv.inc.c | 25 + target/riscv/vector_helper.c | 1477 +++++++++++++++++++++++++++++++ 4 files changed, 1552 insertions(+) diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 31e20dc..28863e2 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -175,5 +175,30 @@ DEF_HELPER_5(vector_vwsubu_wx, void, env, i32, i32, i32, i32) DEF_HELPER_5(vector_vwsub_wv, void, env, i32, i32, i32, i32) DEF_HELPER_5(vector_vwsub_wx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vand_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vor_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vxor_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsll_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsrl_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vsra_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsrl_vi, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vv, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vx, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vector_vnsra_vi, void, env, i32, i32, i32, i32) + DEF_HELPER_4(vector_vsetvli, void, env, i32, i32, i32) DEF_HELPER_4(vector_vsetvl, void, env, i32, i32, i32) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index fc7e498..19710f5 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -315,5 +315,30 @@ vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r +vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm +vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm +vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm +vor_vv 001010 . ..... ..... 000 ..... 1010111 @r_vm +vor_vx 001010 . ..... ..... 100 ..... 1010111 @r_vm +vor_vi 001010 . ..... ..... 011 ..... 1010111 @r_vm +vxor_vv 001011 . ..... ..... 000 ..... 1010111 @r_vm +vxor_vx 001011 . ..... ..... 100 ..... 1010111 @r_vm +vxor_vi 001011 . ..... ..... 011 ..... 1010111 @r_vm +vsll_vv 100101 . ..... ..... 000 ..... 1010111 @r_vm +vsll_vx 100101 . ..... ..... 100 ..... 1010111 @r_vm +vsll_vi 100101 . ..... ..... 011 ..... 1010111 @r_vm +vsrl_vv 101000 . ..... ..... 000 ..... 1010111 @r_vm +vsrl_vx 101000 . ..... ..... 100 ..... 1010111 @r_vm +vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm +vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm +vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm +vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm +vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm +vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm +vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm +vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm +vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm +vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm + vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/target/riscv/insn_trans/trans_rvv.inc.c b/target/riscv/insn_trans/trans_rvv.inc.c index a1c1960..6af29d0 100644 --- a/target/riscv/insn_trans/trans_rvv.inc.c +++ b/target/riscv/insn_trans/trans_rvv.inc.c @@ -204,5 +204,30 @@ GEN_VECTOR_R_VM(vwsubu_wx) GEN_VECTOR_R_VM(vwsub_wv) GEN_VECTOR_R_VM(vwsub_wx) +GEN_VECTOR_R_VM(vand_vv) +GEN_VECTOR_R_VM(vand_vx) +GEN_VECTOR_R_VM(vand_vi) +GEN_VECTOR_R_VM(vor_vv) +GEN_VECTOR_R_VM(vor_vx) +GEN_VECTOR_R_VM(vor_vi) +GEN_VECTOR_R_VM(vxor_vv) +GEN_VECTOR_R_VM(vxor_vx) +GEN_VECTOR_R_VM(vxor_vi) +GEN_VECTOR_R_VM(vsll_vv) +GEN_VECTOR_R_VM(vsll_vx) +GEN_VECTOR_R_VM(vsll_vi) +GEN_VECTOR_R_VM(vsrl_vv) +GEN_VECTOR_R_VM(vsrl_vx) +GEN_VECTOR_R_VM(vsrl_vi) +GEN_VECTOR_R_VM(vsra_vv) +GEN_VECTOR_R_VM(vsra_vx) +GEN_VECTOR_R_VM(vsra_vi) +GEN_VECTOR_R_VM(vnsrl_vv) +GEN_VECTOR_R_VM(vnsrl_vx) +GEN_VECTOR_R_VM(vnsrl_vi) +GEN_VECTOR_R_VM(vnsra_vv) +GEN_VECTOR_R_VM(vnsra_vx) +GEN_VECTOR_R_VM(vnsra_vi) + GEN_VECTOR_R2_ZIMM(vsetvli) GEN_VECTOR_R(vsetvl) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 95336c9..298a10a 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -268,6 +268,25 @@ static void vector_tail_widen(CPURISCVState *env, int vreg, int index, } } +static void vector_tail_narrow(CPURISCVState *env, int vreg, int index, + int width) +{ + switch (width) { + case 8: + env->vfp.vreg[vreg].u8[index] = 0; + break; + case 16: + env->vfp.vreg[vreg].u16[index] = 0; + break; + case 32: + env->vfp.vreg[vreg].u32[index] = 0; + break; + default: + helper_raise_exception(env, RISCV_EXCP_ILLEGAL_INST); + return; + } +} + static inline int vector_get_carry(CPURISCVState *env, int width, int lmul, int index) { @@ -7131,3 +7150,1461 @@ void VECTOR_HELPER(vwsub_wx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, } env->vfp.vstart = 0; } + +void VECTOR_HELPER(vand_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + & env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + & env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + & env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + & env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vand_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + & env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + & env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vand_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + & env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vor_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + | env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + | env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + | env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + | env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vor_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + | env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + | env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vor_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + | env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vxor_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src1].u8[j] + ^ env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src1].u16[j] + ^ env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src1].u32[j] + ^ env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src1].u64[j] + ^ env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vxor_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->gpr[rs1] + ^ env->vfp.vreg[src2].u32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = + (uint64_t)extend_gpr(env->gpr[rs1]) + ^ env->vfp.vreg[src2].u64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vxor_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s8[j]; + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s16[j]; + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s32[j]; + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = sign_extend(rs1, 5) + ^ env->vfp.vreg[src2].s64[j]; + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (env->vfp.vreg[src1].u8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (env->vfp.vreg[src1].u16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (env->vfp.vreg[src1].u32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << (env->vfp.vreg[src1].u64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsll_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + << (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + << (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + << (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + << (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsrl_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (env->vfp.vreg[src1].u8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (env->vfp.vreg[src1].u16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (env->vfp.vreg[src1].u32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> (env->vfp.vreg[src1].u64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vsrl_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vsrl_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u8[j] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u16[j] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u32[j] + >> (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u64[j] = env->vfp.vreg[src2].u64[j] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (env->vfp.vreg[src1].s8[j] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (env->vfp.vreg[src1].s16[j] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (env->vfp.vreg[src1].s32[j] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> (env->vfp.vreg[src1].s64[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (env->gpr[rs1] & 0x7); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (env->gpr[rs1] & 0xf); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> ((uint64_t)extend_gpr(env->gpr[rs1]) & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vsra_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || vector_overlap_vm_common(lmul, vm, rd)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, false); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / width)); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s8[j] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s16[j] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s32[j] + >> (rs1); + } + break; + case 64: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s64[j] = env->vfp.vreg[src2].s64[j] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_common(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vnsrl_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (env->vfp.vreg[src1].u8[j] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (env->vfp.vreg[src1].u16[j] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (env->vfp.vreg[src1].u32[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsrl_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (env->gpr[rs1] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (env->gpr[rs1] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsrl_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u8[j] = env->vfp.vreg[src2].u16[k] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u16[j] = env->vfp.vreg[src2].u32[k] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].u32[j] = env->vfp.vreg[src2].u64[k] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + +void VECTOR_HELPER(vnsra_vv)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src1, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs1, false); + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src1 = rs1 + (i / (VLEN / width)); + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (env->vfp.vreg[src1].s8[j] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (env->vfp.vreg[src1].s16[j] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (env->vfp.vreg[src1].s32[j] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsra_vx)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (env->gpr[rs1] & 0xf); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (env->gpr[rs1] & 0x1f); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (env->gpr[rs1] & 0x3f); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} +void VECTOR_HELPER(vnsra_vi)(CPURISCVState *env, uint32_t vm, uint32_t rs1, + uint32_t rs2, uint32_t rd) +{ + int i, j, k, vl; + uint32_t lmul, width, src2, dest, vlmax; + + vl = env->vfp.vl; + lmul = vector_get_lmul(env); + width = vector_get_width(env); + vlmax = vector_get_vlmax(env); + + if (vector_vtype_ill(env) || + vector_overlap_vm_common(lmul, vm, rd) || + vector_overlap_dstgp_srcgp(rd, lmul, rs2, 2 * lmul)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + return; + } + vector_lmul_check_reg(env, lmul, rs2, true); + vector_lmul_check_reg(env, lmul, rd, false); + + for (i = 0; i < vlmax; i++) { + src2 = rs2 + (i / (VLEN / (2 * width))); + dest = rd + (i / (VLEN / width)); + j = i % (VLEN / width); + k = i % (VLEN / (2 * width)); + if (i < env->vfp.vstart) { + continue; + } else if (i < vl) { + switch (width) { + case 8: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s8[j] = env->vfp.vreg[src2].s16[k] + >> (rs1); + } + break; + case 16: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s16[j] = env->vfp.vreg[src2].s32[k] + >> (rs1); + } + break; + case 32: + if (vector_elem_mask(env, vm, width, lmul, i)) { + env->vfp.vreg[dest].s32[j] = env->vfp.vreg[src2].s64[k] + >> (rs1); + } + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + break; + } + } else { + vector_tail_narrow(env, dest, j, width); + } + } + env->vfp.vstart = 0; +} + -- 2.7.4
next prev parent reply other threads:[~2019-09-11 6:43 UTC|newest] Thread overview: 86+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-09-11 6:25 [Qemu-devel] [PATCH v2 00/17] RISC-V: support vector extension liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 01/17] RISC-V: add vfp field in CPURISCVState liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 14:51 ` [Qemu-devel] " Chih-Min Chao 2019-09-11 14:51 ` [Qemu-riscv] " Chih-Min Chao 2019-09-11 22:39 ` Richard Henderson 2019-09-11 22:39 ` [Qemu-riscv] " Richard Henderson 2019-09-12 14:53 ` Chih-Min Chao 2019-09-12 14:53 ` [Qemu-riscv] " Chih-Min Chao 2019-09-12 15:06 ` Richard Henderson 2019-09-12 15:06 ` [Qemu-riscv] " Richard Henderson 2019-09-17 8:09 ` liuzhiwei 2019-09-17 8:09 ` [Qemu-riscv] " liuzhiwei 2019-09-11 22:32 ` Richard Henderson 2019-09-11 22:32 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 02/17] RISC-V: turn on vector extension from command line by cfg.ext_v Property liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 15:00 ` [Qemu-devel] " Chih-Min Chao 2019-09-11 15:00 ` [Qemu-riscv] " Chih-Min Chao 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 03/17] RISC-V: support vector extension csr liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 15:25 ` [Qemu-devel] " Chih-Min Chao 2019-09-11 15:25 ` Chih-Min Chao 2019-09-11 22:43 ` [Qemu-devel] " Richard Henderson 2019-09-11 22:43 ` [Qemu-riscv] " Richard Henderson 2019-09-14 13:58 ` Palmer Dabbelt 2019-09-14 13:58 ` [Qemu-riscv] " Palmer Dabbelt 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 04/17] RISC-V: add vector extension configure instruction liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 16:04 ` [Qemu-devel] " Chih-Min Chao 2019-09-11 16:04 ` Chih-Min Chao 2019-09-11 23:09 ` [Qemu-devel] " Richard Henderson 2019-09-11 23:09 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 05/17] RISC-V: add vector extension load and store instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 14:23 ` [Qemu-devel] " Richard Henderson 2019-09-12 14:23 ` [Qemu-riscv] " Richard Henderson 2020-01-08 1:32 ` LIU Zhiwei 2020-01-08 1:32 ` LIU Zhiwei 2020-01-08 2:08 ` Richard Henderson 2020-01-08 2:08 ` Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 06/17] RISC-V: add vector extension fault-only-first implementation liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 14:32 ` [Qemu-devel] " Richard Henderson 2019-09-12 14:32 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 07/17] RISC-V: add vector extension atomic instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 14:57 ` [Qemu-devel] " Richard Henderson 2019-09-12 14:57 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 08/17] RISC-V: add vector extension integer instructions part1, add/sub/adc/sbc liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 15:27 ` [Qemu-devel] " Richard Henderson 2019-09-12 15:27 ` [Qemu-riscv] " Richard Henderson 2019-09-12 15:35 ` Richard Henderson 2019-09-12 15:35 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` liuzhiwei [this message] 2019-09-11 6:25 ` [Qemu-riscv] [PATCH v2 09/17] RISC-V: add vector extension integer instructions part2, bit/shift liuzhiwei 2019-09-12 16:41 ` [Qemu-devel] " Richard Henderson 2019-09-12 16:41 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 10/17] RISC-V: add vector extension integer instructions part3, cmp/min/max liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 11/17] RISC-V: add vector extension integer instructions part4, mul/div/merge liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 12/17] RISC-V: add vector extension fixed point instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 13/17] RISC-V: add vector extension float instruction part1, add/sub/mul/div liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 14/17] RISC-V: add vector extension float instructions part2, sqrt/cmp/cvt/others liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 15/17] RISC-V: add vector extension reduction instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 16:54 ` [Qemu-devel] " Richard Henderson 2019-09-12 16:54 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 16/17] RISC-V: add vector extension mask instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 17:07 ` [Qemu-devel] " Richard Henderson 2019-09-12 17:07 ` [Qemu-riscv] " Richard Henderson 2019-09-11 6:25 ` [Qemu-devel] [PATCH v2 17/17] RISC-V: add vector extension premutation instructions liuzhiwei 2019-09-11 6:25 ` [Qemu-riscv] " liuzhiwei 2019-09-12 17:13 ` [Qemu-devel] " Richard Henderson 2019-09-12 17:13 ` [Qemu-riscv] " Richard Henderson 2019-09-11 7:00 ` [Qemu-devel] [PATCH v2 00/17] RISC-V: support vector extension Aleksandar Markovic 2019-09-11 7:00 ` [Qemu-riscv] " Aleksandar Markovic 2019-09-14 12:59 ` Palmer Dabbelt 2019-09-14 12:59 ` [Qemu-riscv] " Palmer Dabbelt
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1568183141-67641-10-git-send-email-zhiwei_liu@c-sky.com \ --to=zhiwei_liu@c-sky.com \ --cc=Alistair.Francis@wdc.com \ --cc=kbastian@mail.uni-paderborn.de \ --cc=laurent@vivier.eu \ --cc=palmer@sifive.com \ --cc=qemu-devel@nongnu.org \ --cc=qemu-riscv@nongnu.org \ --cc=riku.voipio@iki.fi \ --cc=sagark@eecs.berkeley.edu \ --cc=wenmeng_zhang@c-sky.com \ --cc=wxy194768@alibaba-inc.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.