* [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code
@ 2022-08-17 7:48 Yang Liu
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: Yang Liu @ 2022-08-17 7:48 UTC (permalink / raw)
To: palmer, alistair.francis, bin.meng, qemu-riscv, qemu-devel
Cc: wangjunqiang, lazyparser, liweiwei, Yang Liu
Remove duplicate code by wrapping vfwredsum_vs's OP function.
Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
---
target/riscv/vector_helper.c | 56 +++++++-----------------------------
1 file changed, 10 insertions(+), 46 deletions(-)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index a96fc49c71..fd83c0b20b 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -4655,58 +4655,22 @@ GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minimum_number)
GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minimum_number)
GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minimum_number)
-/* Vector Widening Floating-Point Reduction Instructions */
-/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
-void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
- void *vs2, CPURISCVState *env, uint32_t desc)
+/* Vector Widening Floating-Point Add Instructions */
+static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s)
{
- uint32_t vm = vext_vm(desc);
- uint32_t vl = env->vl;
- uint32_t esz = sizeof(uint32_t);
- uint32_t vlenb = simd_maxsz(desc);
- uint32_t vta = vext_vta(desc);
- uint32_t i;
- uint32_t s1 = *((uint32_t *)vs1 + H4(0));
-
- for (i = env->vstart; i < vl; i++) {
- uint16_t s2 = *((uint16_t *)vs2 + H2(i));
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
- &env->fp_status);
- }
- *((uint32_t *)vd + H4(0)) = s1;
- env->vstart = 0;
- /* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, esz, vlenb);
+ return float32_add(a, float16_to_float32(b, true, s), s);
}
-void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
- void *vs2, CPURISCVState *env, uint32_t desc)
+static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
{
- uint32_t vm = vext_vm(desc);
- uint32_t vl = env->vl;
- uint32_t esz = sizeof(uint64_t);
- uint32_t vlenb = simd_maxsz(desc);
- uint32_t vta = vext_vta(desc);
- uint32_t i;
- uint64_t s1 = *((uint64_t *)vs1);
-
- for (i = env->vstart; i < vl; i++) {
- uint32_t s2 = *((uint32_t *)vs2 + H4(i));
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
- &env->fp_status);
- }
- *((uint64_t *)vd) = s1;
- env->vstart = 0;
- /* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, esz, vlenb);
+ return float64_add(a, float32_to_float64(b, s), s);
}
+/* Vector Widening Floating-Point Reduction Instructions */
+/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
+GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
+
/*
*** Vector Mask Operations
*/
--
2.30.1 (Apple Git-130)
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered
2022-08-17 7:48 [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Yang Liu
@ 2022-08-17 7:48 ` Yang Liu
2022-09-23 4:57 ` Alistair Francis
` (2 more replies)
2022-09-23 4:50 ` [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Alistair Francis
2022-09-25 7:25 ` Frank Chang
2 siblings, 3 replies; 7+ messages in thread
From: Yang Liu @ 2022-08-17 7:48 UTC (permalink / raw)
To: palmer, alistair.francis, bin.meng, qemu-riscv, qemu-devel
Cc: wangjunqiang, lazyparser, liweiwei, Yang Liu
Starting with RVV1.0, the original vf[w]redsum_vs instruction was renamed
to vf[w]redusum_vs. The distinction between ordered and unordered is also
more consistent with other instructions, although there is no difference
in implementation between the two for QEMU.
Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
---
target/riscv/helper.h | 15 ++++++++++-----
target/riscv/insn32.decode | 6 ++++--
target/riscv/insn_trans/trans_rvv.c.inc | 6 ++++--
target/riscv/vector_helper.c | 19 +++++++++++++------
4 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 4ef3b2251d..a03014fe67 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1009,9 +1009,12 @@ DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredusum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
@@ -1019,8 +1022,10 @@ DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 4033565393..2873a7ae04 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -659,11 +659,13 @@ vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
# Vector ordered and unordered reduction sum
-vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
+vfredusum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm
+vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm
vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
-vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
+vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
+vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 6c091824b6..9c9de17f8a 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3112,7 +3112,8 @@ static bool freduction_check(DisasContext *s, arg_rmrr *a)
require_zve64f(s);
}
-GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
+GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
+GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
@@ -3124,7 +3125,8 @@ static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
(s->sew != MO_8);
}
-GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
/*
*** Vector Mask Operations
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index fd83c0b20b..d87f79ad82 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -4641,9 +4641,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
}
/* Unordered sum */
-GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
-GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
-GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
+GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
+GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
+GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
+
+/* Ordered sum */
+GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
+GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
+GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
/* Maximum value */
GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maximum_number)
@@ -4667,9 +4672,11 @@ static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
}
/* Vector Widening Floating-Point Reduction Instructions */
-/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
-GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
-GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
+/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
+GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
+GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
+GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
/*
*** Vector Mask Operations
--
2.30.1 (Apple Git-130)
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code
2022-08-17 7:48 [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Yang Liu
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
@ 2022-09-23 4:50 ` Alistair Francis
2022-09-25 7:25 ` Frank Chang
2 siblings, 0 replies; 7+ messages in thread
From: Alistair Francis @ 2022-09-23 4:50 UTC (permalink / raw)
To: Yang Liu
Cc: Palmer Dabbelt, Alistair Francis, Bin Meng, open list:RISC-V,
qemu-devel@nongnu.org Developers, wangjunqiang,
Wei Wu (吴伟),
liweiwei
On Thu, Aug 18, 2022 at 1:57 AM Yang Liu <liuyang22@iscas.ac.cn> wrote:
>
> Remove duplicate code by wrapping vfwredsum_vs's OP function.
>
> Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/vector_helper.c | 56 +++++++-----------------------------
> 1 file changed, 10 insertions(+), 46 deletions(-)
>
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index a96fc49c71..fd83c0b20b 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4655,58 +4655,22 @@ GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minimum_number)
> GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minimum_number)
> GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minimum_number)
>
> -/* Vector Widening Floating-Point Reduction Instructions */
> -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> -void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
> - void *vs2, CPURISCVState *env, uint32_t desc)
> +/* Vector Widening Floating-Point Add Instructions */
> +static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s)
> {
> - uint32_t vm = vext_vm(desc);
> - uint32_t vl = env->vl;
> - uint32_t esz = sizeof(uint32_t);
> - uint32_t vlenb = simd_maxsz(desc);
> - uint32_t vta = vext_vta(desc);
> - uint32_t i;
> - uint32_t s1 = *((uint32_t *)vs1 + H4(0));
> -
> - for (i = env->vstart; i < vl; i++) {
> - uint16_t s2 = *((uint16_t *)vs2 + H2(i));
> - if (!vm && !vext_elem_mask(v0, i)) {
> - continue;
> - }
> - s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
> - &env->fp_status);
> - }
> - *((uint32_t *)vd + H4(0)) = s1;
> - env->vstart = 0;
> - /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, esz, vlenb);
> + return float32_add(a, float16_to_float32(b, true, s), s);
> }
>
> -void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
> - void *vs2, CPURISCVState *env, uint32_t desc)
> +static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
> {
> - uint32_t vm = vext_vm(desc);
> - uint32_t vl = env->vl;
> - uint32_t esz = sizeof(uint64_t);
> - uint32_t vlenb = simd_maxsz(desc);
> - uint32_t vta = vext_vta(desc);
> - uint32_t i;
> - uint64_t s1 = *((uint64_t *)vs1);
> -
> - for (i = env->vstart; i < vl; i++) {
> - uint32_t s2 = *((uint32_t *)vs2 + H4(i));
> - if (!vm && !vext_elem_mask(v0, i)) {
> - continue;
> - }
> - s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
> - &env->fp_status);
> - }
> - *((uint64_t *)vd) = s1;
> - env->vstart = 0;
> - /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, esz, vlenb);
> + return float64_add(a, float32_to_float64(b, s), s);
> }
>
> +/* Vector Widening Floating-Point Reduction Instructions */
> +/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> +GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +
> /*
> *** Vector Mask Operations
> */
> --
> 2.30.1 (Apple Git-130)
>
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
@ 2022-09-23 4:57 ` Alistair Francis
2022-09-25 7:35 ` Frank Chang
2022-09-25 21:17 ` Alistair Francis
2 siblings, 0 replies; 7+ messages in thread
From: Alistair Francis @ 2022-09-23 4:57 UTC (permalink / raw)
To: Yang Liu
Cc: Palmer Dabbelt, Alistair Francis, Bin Meng, open list:RISC-V,
qemu-devel@nongnu.org Developers, wangjunqiang,
Wei Wu (吴伟),
liweiwei
On Thu, Aug 18, 2022 at 1:43 AM Yang Liu <liuyang22@iscas.ac.cn> wrote:
>
> Starting with RVV1.0, the original vf[w]redsum_vs instruction was renamed
> to vf[w]redusum_vs. The distinction between ordered and unordered is also
> more consistent with other instructions, although there is no difference
> in implementation between the two for QEMU.
>
> Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/helper.h | 15 ++++++++++-----
> target/riscv/insn32.decode | 6 ++++--
> target/riscv/insn_trans/trans_rvv.c.inc | 6 ++++--
> target/riscv/vector_helper.c | 19 +++++++++++++------
> 4 files changed, 31 insertions(+), 15 deletions(-)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index 4ef3b2251d..a03014fe67 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -1009,9 +1009,12 @@ DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> @@ -1019,8 +1022,10 @@ DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index 4033565393..2873a7ae04 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -659,11 +659,13 @@ vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
> vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
> vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
> # Vector ordered and unordered reduction sum
> -vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredusum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
> # Vector widening ordered and unordered float reduction sum
> -vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
> vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
> vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
> vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
> index 6c091824b6..9c9de17f8a 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -3112,7 +3112,8 @@ static bool freduction_check(DisasContext *s, arg_rmrr *a)
> require_zve64f(s);
> }
>
> -GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
>
> @@ -3124,7 +3125,8 @@ static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
> (s->sew != MO_8);
> }
>
> -GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
>
> /*
> *** Vector Mask Operations
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index fd83c0b20b..d87f79ad82 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4641,9 +4641,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> }
>
> /* Unordered sum */
> -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +
> +/* Ordered sum */
> +GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
>
> /* Maximum value */
> GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maximum_number)
> @@ -4667,9 +4672,11 @@ static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
> }
>
> /* Vector Widening Floating-Point Reduction Instructions */
> -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> -GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> -GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> +GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
>
> /*
> *** Vector Mask Operations
> --
> 2.30.1 (Apple Git-130)
>
>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code
2022-08-17 7:48 [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Yang Liu
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
2022-09-23 4:50 ` [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Alistair Francis
@ 2022-09-25 7:25 ` Frank Chang
2 siblings, 0 replies; 7+ messages in thread
From: Frank Chang @ 2022-09-25 7:25 UTC (permalink / raw)
To: Yang Liu
Cc: palmer, alistair.francis, bin.meng, qemu-riscv, qemu-devel,
wangjunqiang, lazyparser, liweiwei
[-- Attachment #1: Type: text/plain, Size: 3361 bytes --]
Reviewed-by: Frank Chang <frank.chang@sifive.com>
On Wed, Aug 17, 2022 at 11:32 PM Yang Liu <liuyang22@iscas.ac.cn> wrote:
> Remove duplicate code by wrapping vfwredsum_vs's OP function.
>
> Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
> ---
> target/riscv/vector_helper.c | 56 +++++++-----------------------------
> 1 file changed, 10 insertions(+), 46 deletions(-)
>
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index a96fc49c71..fd83c0b20b 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4655,58 +4655,22 @@ GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t,
> H2, H2, float16_minimum_number)
> GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4,
> float32_minimum_number)
> GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8,
> float64_minimum_number)
>
> -/* Vector Widening Floating-Point Reduction Instructions */
> -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> -void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
> - void *vs2, CPURISCVState *env, uint32_t desc)
> +/* Vector Widening Floating-Point Add Instructions */
> +static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s)
> {
> - uint32_t vm = vext_vm(desc);
> - uint32_t vl = env->vl;
> - uint32_t esz = sizeof(uint32_t);
> - uint32_t vlenb = simd_maxsz(desc);
> - uint32_t vta = vext_vta(desc);
> - uint32_t i;
> - uint32_t s1 = *((uint32_t *)vs1 + H4(0));
> -
> - for (i = env->vstart; i < vl; i++) {
> - uint16_t s2 = *((uint16_t *)vs2 + H2(i));
> - if (!vm && !vext_elem_mask(v0, i)) {
> - continue;
> - }
> - s1 = float32_add(s1, float16_to_float32(s2, true,
> &env->fp_status),
> - &env->fp_status);
> - }
> - *((uint32_t *)vd + H4(0)) = s1;
> - env->vstart = 0;
> - /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, esz, vlenb);
> + return float32_add(a, float16_to_float32(b, true, s), s);
> }
>
> -void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
> - void *vs2, CPURISCVState *env, uint32_t desc)
> +static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
> {
> - uint32_t vm = vext_vm(desc);
> - uint32_t vl = env->vl;
> - uint32_t esz = sizeof(uint64_t);
> - uint32_t vlenb = simd_maxsz(desc);
> - uint32_t vta = vext_vta(desc);
> - uint32_t i;
> - uint64_t s1 = *((uint64_t *)vs1);
> -
> - for (i = env->vstart; i < vl; i++) {
> - uint32_t s2 = *((uint32_t *)vs2 + H4(i));
> - if (!vm && !vext_elem_mask(v0, i)) {
> - continue;
> - }
> - s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
> - &env->fp_status);
> - }
> - *((uint64_t *)vd) = s1;
> - env->vstart = 0;
> - /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, esz, vlenb);
> + return float64_add(a, float32_to_float64(b, s), s);
> }
>
> +/* Vector Widening Floating-Point Reduction Instructions */
> +/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> +GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +
> /*
> *** Vector Mask Operations
> */
> --
> 2.30.1 (Apple Git-130)
>
>
>
[-- Attachment #2: Type: text/html, Size: 4246 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
2022-09-23 4:57 ` Alistair Francis
@ 2022-09-25 7:35 ` Frank Chang
2022-09-25 21:17 ` Alistair Francis
2 siblings, 0 replies; 7+ messages in thread
From: Frank Chang @ 2022-09-25 7:35 UTC (permalink / raw)
To: Yang Liu
Cc: palmer, alistair.francis, bin.meng, qemu-riscv, qemu-devel,
wangjunqiang, lazyparser, liweiwei
[-- Attachment #1: Type: text/plain, Size: 6778 bytes --]
Reviewed-by: Frank Chang <frank.chang@sifive.com>
On Wed, Aug 17, 2022 at 11:45 PM Yang Liu <liuyang22@iscas.ac.cn> wrote:
> Starting with RVV1.0, the original vf[w]redsum_vs instruction was renamed
> to vf[w]redusum_vs. The distinction between ordered and unordered is also
> more consistent with other instructions, although there is no difference
> in implementation between the two for QEMU.
>
> Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
> ---
> target/riscv/helper.h | 15 ++++++++++-----
> target/riscv/insn32.decode | 6 ++++--
> target/riscv/insn_trans/trans_rvv.c.inc | 6 ++++--
> target/riscv/vector_helper.c | 19 +++++++++++++------
> 4 files changed, 31 insertions(+), 15 deletions(-)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index 4ef3b2251d..a03014fe67 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -1009,9 +1009,12 @@ DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr,
> ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> @@ -1019,8 +1022,10 @@ DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr,
> ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index 4033565393..2873a7ae04 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -659,11 +659,13 @@ vredmax_vs 000111 . ..... ..... 010 .....
> 1010111 @r_vm
> vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
> vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
> # Vector ordered and unordered reduction sum
> -vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredusum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
> # Vector widening ordered and unordered float reduction sum
> -vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
> vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
> vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
> vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
> b/target/riscv/insn_trans/trans_rvv.c.inc
> index 6c091824b6..9c9de17f8a 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -3112,7 +3112,8 @@ static bool freduction_check(DisasContext *s,
> arg_rmrr *a)
> require_zve64f(s);
> }
>
> -GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
>
> @@ -3124,7 +3125,8 @@ static bool freduction_widen_check(DisasContext *s,
> arg_rmrr *a)
> (s->sew != MO_8);
> }
>
> -GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
>
> /*
> *** Vector Mask Operations
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index fd83c0b20b..d87f79ad82 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4641,9 +4641,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,
> \
> }
>
> /* Unordered sum */
> -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +
> +/* Ordered sum */
> +GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
>
> /* Maximum value */
> GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2,
> float16_maximum_number)
> @@ -4667,9 +4672,11 @@ static uint64_t fwadd32(uint64_t a, uint32_t b,
> float_status *s)
> }
>
> /* Vector Widening Floating-Point Reduction Instructions */
> -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> -GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> -GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> +GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
>
> /*
> *** Vector Mask Operations
> --
> 2.30.1 (Apple Git-130)
>
>
>
[-- Attachment #2: Type: text/html, Size: 7688 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
2022-09-23 4:57 ` Alistair Francis
2022-09-25 7:35 ` Frank Chang
@ 2022-09-25 21:17 ` Alistair Francis
2 siblings, 0 replies; 7+ messages in thread
From: Alistair Francis @ 2022-09-25 21:17 UTC (permalink / raw)
To: Yang Liu
Cc: Palmer Dabbelt, Alistair Francis, Bin Meng, open list:RISC-V,
qemu-devel@nongnu.org Developers, wangjunqiang,
Wei Wu (吴伟),
liweiwei
On Thu, Aug 18, 2022 at 1:43 AM Yang Liu <liuyang22@iscas.ac.cn> wrote:
>
> Starting with RVV1.0, the original vf[w]redsum_vs instruction was renamed
> to vf[w]redusum_vs. The distinction between ordered and unordered is also
> more consistent with other instructions, although there is no difference
> in implementation between the two for QEMU.
>
> Signed-off-by: Yang Liu <liuyang22@iscas.ac.cn>
Thanks!
Applied to riscv-to-apply.next
Alistair
> ---
> target/riscv/helper.h | 15 ++++++++++-----
> target/riscv/insn32.decode | 6 ++++--
> target/riscv/insn_trans/trans_rvv.c.inc | 6 ++++--
> target/riscv/vector_helper.c | 19 +++++++++++++------
> 4 files changed, 31 insertions(+), 15 deletions(-)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index 4ef3b2251d..a03014fe67 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -1009,9 +1009,12 @@ DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredusum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfredosum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> @@ -1019,8 +1022,10 @@ DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
>
> -DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> -DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredusum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vfwredosum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
>
> DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index 4033565393..2873a7ae04 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -659,11 +659,13 @@ vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
> vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
> vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
> # Vector ordered and unordered reduction sum
> -vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredusum_vs 000001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfredosum_vs 000011 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
> vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
> # Vector widening ordered and unordered float reduction sum
> -vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
> +vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
> vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
> vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
> vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
> index 6c091824b6..9c9de17f8a 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -3112,7 +3112,8 @@ static bool freduction_check(DisasContext *s, arg_rmrr *a)
> require_zve64f(s);
> }
>
> -GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
> +GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
> GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
>
> @@ -3124,7 +3125,8 @@ static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
> (s->sew != MO_8);
> }
>
> -GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
> +GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
>
> /*
> *** Vector Mask Operations
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index fd83c0b20b..d87f79ad82 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4641,9 +4641,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> }
>
> /* Unordered sum */
> -GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> -GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> -GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
> +
> +/* Ordered sum */
> +GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
> +GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
> +GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
>
> /* Maximum value */
> GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maximum_number)
> @@ -4667,9 +4672,11 @@ static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
> }
>
> /* Vector Widening Floating-Point Reduction Instructions */
> -/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> -GEN_VEXT_FRED(vfwredsum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> -GEN_VEXT_FRED(vfwredsum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
> +GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
> +GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
> +GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
>
> /*
> *** Vector Mask Operations
> --
> 2.30.1 (Apple Git-130)
>
>
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-09-25 21:19 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-17 7:48 [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Yang Liu
2022-08-17 7:48 ` [PATCH 2/2] target/riscv: rvv-1.0: vf[w]redsum distinguish between ordered/unordered Yang Liu
2022-09-23 4:57 ` Alistair Francis
2022-09-25 7:35 ` Frank Chang
2022-09-25 21:17 ` Alistair Francis
2022-09-23 4:50 ` [PATCH 1/2] target/riscv: rvv-1.0: Simplify vfwredsum code Alistair Francis
2022-09-25 7:25 ` Frank Chang
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.