All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alistair Francis <alistair23@gmail.com>
To: LIU Zhiwei <zhiwei_liu@c-sky.com>
Cc: Richard Henderson <richard.henderson@linaro.org>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	"open list:RISC-V" <qemu-riscv@nongnu.org>,
	"qemu-devel@nongnu.org Developers" <qemu-devel@nongnu.org>
Subject: Re: [PATCH 02/38] target/riscv: Hoist vector functions
Date: Tue, 9 Mar 2021 09:10:28 -0500	[thread overview]
Message-ID: <CAKmqyKM5AAAi42p-mMot_dhrcs59QDAbVdgHpnvtZr-3YEKnaQ@mail.gmail.com> (raw)
In-Reply-To: <20210212150256.885-3-zhiwei_liu@c-sky.com>

On Fri, Feb 12, 2021 at 10:07 AM LIU Zhiwei <zhiwei_liu@c-sky.com> wrote:
>
> The saturate functions about add,subtract and shift functions can
> be used in packed extension.Therefore hoist them up.

A better title might be:

target/riscv: Make the vector helper functions public

Otherwise:

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

>
> The endianess process macro is also be hoisted.
>
> Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
> ---
>  target/riscv/internals.h     | 50 ++++++++++++++++++++++
>  target/riscv/vector_helper.c | 82 +++++++++++-------------------------
>  2 files changed, 74 insertions(+), 58 deletions(-)
>
> diff --git a/target/riscv/internals.h b/target/riscv/internals.h
> index b15ad394bb..698158e116 100644
> --- a/target/riscv/internals.h
> +++ b/target/riscv/internals.h
> @@ -58,4 +58,54 @@ static inline float32 check_nanbox_s(uint64_t f)
>      }
>  }
>
> +/*
> + * Note that vector data is stored in host-endian 64-bit chunks,
> + * so addressing units smaller than that needs a host-endian fixup.
> + */
> +#ifdef HOST_WORDS_BIGENDIAN
> +#define H1(x)   ((x) ^ 7)
> +#define H1_2(x) ((x) ^ 6)
> +#define H1_4(x) ((x) ^ 4)
> +#define H2(x)   ((x) ^ 3)
> +#define H4(x)   ((x) ^ 1)
> +#define H8(x)   ((x))
> +#else
> +#define H1(x)   (x)
> +#define H1_2(x) (x)
> +#define H1_4(x) (x)
> +#define H2(x)   (x)
> +#define H4(x)   (x)
> +#define H8(x)   (x)
> +#endif
> +
> +/* share functions about saturation */
> +int8_t sadd8(CPURISCVState *, int vxrm, int8_t, int8_t);
> +int16_t sadd16(CPURISCVState *, int vxrm, int16_t, int16_t);
> +int32_t sadd32(CPURISCVState *, int vxrm, int32_t, int32_t);
> +int64_t sadd64(CPURISCVState *, int vxrm, int64_t, int64_t);
> +
> +uint8_t saddu8(CPURISCVState *, int vxrm, uint8_t, uint8_t);
> +uint16_t saddu16(CPURISCVState *, int vxrm, uint16_t, uint16_t);
> +uint32_t saddu32(CPURISCVState *, int vxrm, uint32_t, uint32_t);
> +uint64_t saddu64(CPURISCVState *, int vxrm, uint64_t, uint64_t);
> +
> +int8_t ssub8(CPURISCVState *, int vxrm, int8_t, int8_t);
> +int16_t ssub16(CPURISCVState *, int vxrm, int16_t, int16_t);
> +int32_t ssub32(CPURISCVState *, int vxrm, int32_t, int32_t);
> +int64_t ssub64(CPURISCVState *, int vxrm, int64_t, int64_t);
> +
> +uint8_t ssubu8(CPURISCVState *, int vxrm, uint8_t, uint8_t);
> +uint16_t ssubu16(CPURISCVState *, int vxrm, uint16_t, uint16_t);
> +uint32_t ssubu32(CPURISCVState *, int vxrm, uint32_t, uint32_t);
> +uint64_t ssubu64(CPURISCVState *, int vxrm, uint64_t, uint64_t);
> +
> +/* share shift functions */
> +int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b);
> +int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b);
> +int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b);
> +int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
> +uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b);
> +uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b);
> +uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b);
> +uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b);
>  #endif
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index a156573d28..9371d70f6b 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -56,26 +56,6 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
>      return vl;
>  }
>
> -/*
> - * Note that vector data is stored in host-endian 64-bit chunks,
> - * so addressing units smaller than that needs a host-endian fixup.
> - */
> -#ifdef HOST_WORDS_BIGENDIAN
> -#define H1(x)   ((x) ^ 7)
> -#define H1_2(x) ((x) ^ 6)
> -#define H1_4(x) ((x) ^ 4)
> -#define H2(x)   ((x) ^ 3)
> -#define H4(x)   ((x) ^ 1)
> -#define H8(x)   ((x))
> -#else
> -#define H1(x)   (x)
> -#define H1_2(x) (x)
> -#define H1_4(x) (x)
> -#define H2(x)   (x)
> -#define H4(x)   (x)
> -#define H8(x)   (x)
> -#endif
> -
>  static inline uint32_t vext_nf(uint32_t desc)
>  {
>      return FIELD_EX32(simd_data(desc), VDATA, NF);
> @@ -2199,7 +2179,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,     \
>                   do_##NAME, CLEAR_FN);                          \
>  }
>
> -static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t res = a + b;
>      if (res < a) {
> @@ -2209,8 +2189,7 @@ static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      return res;
>  }
>
> -static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
> -                               uint16_t b)
> +uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint16_t res = a + b;
>      if (res < a) {
> @@ -2220,8 +2199,7 @@ static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
>      return res;
>  }
>
> -static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
> -                               uint32_t b)
> +uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint32_t res = a + b;
>      if (res < a) {
> @@ -2231,8 +2209,7 @@ static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
>      return res;
>  }
>
> -static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a,
> -                               uint64_t b)
> +uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint64_t res = a + b;
>      if (res < a) {
> @@ -2328,7 +2305,7 @@ GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq)
>
> -static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      int8_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT8_MIN) {
> @@ -2338,7 +2315,7 @@ static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      return res;
>  }
>
> -static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      int16_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT16_MIN) {
> @@ -2348,7 +2325,7 @@ static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      return res;
>  }
>
> -static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      int32_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT32_MIN) {
> @@ -2358,7 +2335,7 @@ static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      return res;
>  }
>
> -static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      int64_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT64_MIN) {
> @@ -2386,7 +2363,7 @@ GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq)
>
> -static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t res = a - b;
>      if (res > a) {
> @@ -2396,8 +2373,7 @@ static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      return res;
>  }
>
> -static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
> -                               uint16_t b)
> +uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint16_t res = a - b;
>      if (res > a) {
> @@ -2407,8 +2383,7 @@ static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
>      return res;
>  }
>
> -static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
> -                               uint32_t b)
> +uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint32_t res = a - b;
>      if (res > a) {
> @@ -2418,8 +2393,7 @@ static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
>      return res;
>  }
>
> -static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a,
> -                               uint64_t b)
> +uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint64_t res = a - b;
>      if (res > a) {
> @@ -2447,7 +2421,7 @@ GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq)
>
> -static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      int8_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT8_MIN) {
> @@ -2457,7 +2431,7 @@ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      return res;
>  }
>
> -static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      int16_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT16_MIN) {
> @@ -2467,7 +2441,7 @@ static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      return res;
>  }
>
> -static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      int32_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT32_MIN) {
> @@ -2477,7 +2451,7 @@ static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      return res;
>  }
>
> -static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      int64_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT64_MIN) {
> @@ -2918,8 +2892,7 @@ GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl)
>  GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq)
>
>  /* Vector Single-Width Scaling Shift Instructions */
> -static inline uint8_t
> -vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t round, shift = b & 0x7;
>      uint8_t res;
> @@ -2928,8 +2901,7 @@ vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint16_t
> -vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
> +uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint8_t round, shift = b & 0xf;
>      uint16_t res;
> @@ -2938,8 +2910,7 @@ vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint32_t
> -vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
> +uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint8_t round, shift = b & 0x1f;
>      uint32_t res;
> @@ -2948,8 +2919,7 @@ vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint64_t
> -vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
> +uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint8_t round, shift = b & 0x3f;
>      uint64_t res;
> @@ -2976,8 +2946,7 @@ GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq)
>
> -static inline int8_t
> -vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      uint8_t round, shift = b & 0x7;
>      int8_t res;
> @@ -2986,8 +2955,7 @@ vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int16_t
> -vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      uint8_t round, shift = b & 0xf;
>      int16_t res;
> @@ -2996,8 +2964,7 @@ vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int32_t
> -vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      uint8_t round, shift = b & 0x1f;
>      int32_t res;
> @@ -3006,8 +2973,7 @@ vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int64_t
> -vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      uint8_t round, shift = b & 0x3f;
>      int64_t res;
> --
> 2.17.1
>


WARNING: multiple messages have this Message-ID (diff)
From: Alistair Francis <alistair23@gmail.com>
To: LIU Zhiwei <zhiwei_liu@c-sky.com>
Cc: "qemu-devel@nongnu.org Developers" <qemu-devel@nongnu.org>,
	"open list:RISC-V" <qemu-riscv@nongnu.org>,
	 Richard Henderson <richard.henderson@linaro.org>,
	Palmer Dabbelt <palmer@dabbelt.com>
Subject: Re: [PATCH 02/38] target/riscv: Hoist vector functions
Date: Tue, 9 Mar 2021 09:10:28 -0500	[thread overview]
Message-ID: <CAKmqyKM5AAAi42p-mMot_dhrcs59QDAbVdgHpnvtZr-3YEKnaQ@mail.gmail.com> (raw)
In-Reply-To: <20210212150256.885-3-zhiwei_liu@c-sky.com>

On Fri, Feb 12, 2021 at 10:07 AM LIU Zhiwei <zhiwei_liu@c-sky.com> wrote:
>
> The saturate functions about add,subtract and shift functions can
> be used in packed extension.Therefore hoist them up.

A better title might be:

target/riscv: Make the vector helper functions public

Otherwise:

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

>
> The endianess process macro is also be hoisted.
>
> Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
> ---
>  target/riscv/internals.h     | 50 ++++++++++++++++++++++
>  target/riscv/vector_helper.c | 82 +++++++++++-------------------------
>  2 files changed, 74 insertions(+), 58 deletions(-)
>
> diff --git a/target/riscv/internals.h b/target/riscv/internals.h
> index b15ad394bb..698158e116 100644
> --- a/target/riscv/internals.h
> +++ b/target/riscv/internals.h
> @@ -58,4 +58,54 @@ static inline float32 check_nanbox_s(uint64_t f)
>      }
>  }
>
> +/*
> + * Note that vector data is stored in host-endian 64-bit chunks,
> + * so addressing units smaller than that needs a host-endian fixup.
> + */
> +#ifdef HOST_WORDS_BIGENDIAN
> +#define H1(x)   ((x) ^ 7)
> +#define H1_2(x) ((x) ^ 6)
> +#define H1_4(x) ((x) ^ 4)
> +#define H2(x)   ((x) ^ 3)
> +#define H4(x)   ((x) ^ 1)
> +#define H8(x)   ((x))
> +#else
> +#define H1(x)   (x)
> +#define H1_2(x) (x)
> +#define H1_4(x) (x)
> +#define H2(x)   (x)
> +#define H4(x)   (x)
> +#define H8(x)   (x)
> +#endif
> +
> +/* share functions about saturation */
> +int8_t sadd8(CPURISCVState *, int vxrm, int8_t, int8_t);
> +int16_t sadd16(CPURISCVState *, int vxrm, int16_t, int16_t);
> +int32_t sadd32(CPURISCVState *, int vxrm, int32_t, int32_t);
> +int64_t sadd64(CPURISCVState *, int vxrm, int64_t, int64_t);
> +
> +uint8_t saddu8(CPURISCVState *, int vxrm, uint8_t, uint8_t);
> +uint16_t saddu16(CPURISCVState *, int vxrm, uint16_t, uint16_t);
> +uint32_t saddu32(CPURISCVState *, int vxrm, uint32_t, uint32_t);
> +uint64_t saddu64(CPURISCVState *, int vxrm, uint64_t, uint64_t);
> +
> +int8_t ssub8(CPURISCVState *, int vxrm, int8_t, int8_t);
> +int16_t ssub16(CPURISCVState *, int vxrm, int16_t, int16_t);
> +int32_t ssub32(CPURISCVState *, int vxrm, int32_t, int32_t);
> +int64_t ssub64(CPURISCVState *, int vxrm, int64_t, int64_t);
> +
> +uint8_t ssubu8(CPURISCVState *, int vxrm, uint8_t, uint8_t);
> +uint16_t ssubu16(CPURISCVState *, int vxrm, uint16_t, uint16_t);
> +uint32_t ssubu32(CPURISCVState *, int vxrm, uint32_t, uint32_t);
> +uint64_t ssubu64(CPURISCVState *, int vxrm, uint64_t, uint64_t);
> +
> +/* share shift functions */
> +int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b);
> +int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b);
> +int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b);
> +int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
> +uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b);
> +uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b);
> +uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b);
> +uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b);
>  #endif
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index a156573d28..9371d70f6b 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -56,26 +56,6 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
>      return vl;
>  }
>
> -/*
> - * Note that vector data is stored in host-endian 64-bit chunks,
> - * so addressing units smaller than that needs a host-endian fixup.
> - */
> -#ifdef HOST_WORDS_BIGENDIAN
> -#define H1(x)   ((x) ^ 7)
> -#define H1_2(x) ((x) ^ 6)
> -#define H1_4(x) ((x) ^ 4)
> -#define H2(x)   ((x) ^ 3)
> -#define H4(x)   ((x) ^ 1)
> -#define H8(x)   ((x))
> -#else
> -#define H1(x)   (x)
> -#define H1_2(x) (x)
> -#define H1_4(x) (x)
> -#define H2(x)   (x)
> -#define H4(x)   (x)
> -#define H8(x)   (x)
> -#endif
> -
>  static inline uint32_t vext_nf(uint32_t desc)
>  {
>      return FIELD_EX32(simd_data(desc), VDATA, NF);
> @@ -2199,7 +2179,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,     \
>                   do_##NAME, CLEAR_FN);                          \
>  }
>
> -static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t res = a + b;
>      if (res < a) {
> @@ -2209,8 +2189,7 @@ static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      return res;
>  }
>
> -static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
> -                               uint16_t b)
> +uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint16_t res = a + b;
>      if (res < a) {
> @@ -2220,8 +2199,7 @@ static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
>      return res;
>  }
>
> -static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
> -                               uint32_t b)
> +uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint32_t res = a + b;
>      if (res < a) {
> @@ -2231,8 +2209,7 @@ static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
>      return res;
>  }
>
> -static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a,
> -                               uint64_t b)
> +uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint64_t res = a + b;
>      if (res < a) {
> @@ -2328,7 +2305,7 @@ GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq)
>
> -static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      int8_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT8_MIN) {
> @@ -2338,7 +2315,7 @@ static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      return res;
>  }
>
> -static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      int16_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT16_MIN) {
> @@ -2348,7 +2325,7 @@ static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      return res;
>  }
>
> -static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      int32_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT32_MIN) {
> @@ -2358,7 +2335,7 @@ static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      return res;
>  }
>
> -static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      int64_t res = a + b;
>      if ((res ^ a) & (res ^ b) & INT64_MIN) {
> @@ -2386,7 +2363,7 @@ GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq)
>
> -static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t res = a - b;
>      if (res > a) {
> @@ -2396,8 +2373,7 @@ static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      return res;
>  }
>
> -static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
> -                               uint16_t b)
> +uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint16_t res = a - b;
>      if (res > a) {
> @@ -2407,8 +2383,7 @@ static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
>      return res;
>  }
>
> -static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
> -                               uint32_t b)
> +uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint32_t res = a - b;
>      if (res > a) {
> @@ -2418,8 +2393,7 @@ static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
>      return res;
>  }
>
> -static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a,
> -                               uint64_t b)
> +uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint64_t res = a - b;
>      if (res > a) {
> @@ -2447,7 +2421,7 @@ GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq)
>
> -static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      int8_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT8_MIN) {
> @@ -2457,7 +2431,7 @@ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      return res;
>  }
>
> -static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      int16_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT16_MIN) {
> @@ -2467,7 +2441,7 @@ static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      return res;
>  }
>
> -static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      int32_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT32_MIN) {
> @@ -2477,7 +2451,7 @@ static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      return res;
>  }
>
> -static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      int64_t res = a - b;
>      if ((res ^ a) & (a ^ b) & INT64_MIN) {
> @@ -2918,8 +2892,7 @@ GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl)
>  GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq)
>
>  /* Vector Single-Width Scaling Shift Instructions */
> -static inline uint8_t
> -vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
> +uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>  {
>      uint8_t round, shift = b & 0x7;
>      uint8_t res;
> @@ -2928,8 +2901,7 @@ vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint16_t
> -vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
> +uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>  {
>      uint8_t round, shift = b & 0xf;
>      uint16_t res;
> @@ -2938,8 +2910,7 @@ vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint32_t
> -vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
> +uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>  {
>      uint8_t round, shift = b & 0x1f;
>      uint32_t res;
> @@ -2948,8 +2919,7 @@ vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline uint64_t
> -vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
> +uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
>  {
>      uint8_t round, shift = b & 0x3f;
>      uint64_t res;
> @@ -2976,8 +2946,7 @@ GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh)
>  GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl)
>  GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq)
>
> -static inline int8_t
> -vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
> +int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>  {
>      uint8_t round, shift = b & 0x7;
>      int8_t res;
> @@ -2986,8 +2955,7 @@ vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int16_t
> -vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
> +int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>  {
>      uint8_t round, shift = b & 0xf;
>      int16_t res;
> @@ -2996,8 +2964,7 @@ vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int32_t
> -vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
> +int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>  {
>      uint8_t round, shift = b & 0x1f;
>      int32_t res;
> @@ -3006,8 +2973,7 @@ vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
>      res   = (a >> shift)  + round;
>      return res;
>  }
> -static inline int64_t
> -vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
>  {
>      uint8_t round, shift = b & 0x3f;
>      int64_t res;
> --
> 2.17.1
>


  reply	other threads:[~2021-03-09 14:12 UTC|newest]

Thread overview: 150+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-12 15:02 [PATCH 00/38] target/riscv: support packed extension v0.9.2 LIU Zhiwei
2021-02-12 15:02 ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 01/38] target/riscv: implementation-defined constant parameters LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-09 14:08   ` Alistair Francis
2021-03-09 14:08     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 02/38] target/riscv: Hoist vector functions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-09 14:10   ` Alistair Francis [this message]
2021-03-09 14:10     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 03/38] target/riscv: Fixup saturate subtract function LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 18:52   ` Richard Henderson
2021-02-12 18:52     ` Richard Henderson
2021-03-09 14:11   ` Alistair Francis
2021-03-09 14:11     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 04/38] target/riscv: 16-bit Addition & Subtraction Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 18:03   ` Richard Henderson
2021-02-12 18:03     ` Richard Henderson
2021-02-18  8:39     ` LIU Zhiwei
2021-02-18  8:39       ` LIU Zhiwei
2021-02-18 16:20       ` Richard Henderson
2021-02-18 16:20         ` Richard Henderson
2021-02-12 19:02   ` Richard Henderson
2021-02-12 19:02     ` Richard Henderson
2021-02-18  8:47     ` LIU Zhiwei
2021-02-18  8:47       ` LIU Zhiwei
2021-02-18 16:21       ` Richard Henderson
2021-02-18 16:21         ` Richard Henderson
2021-02-12 15:02 ` [PATCH 05/38] target/riscv: 8-bit Addition & Subtraction Instruction LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:22   ` Alistair Francis
2021-03-15 21:22     ` Alistair Francis
2021-05-24  1:00     ` Palmer Dabbelt
2021-05-24  1:00       ` Palmer Dabbelt
2021-05-26  5:43       ` LIU Zhiwei
2021-05-26  5:43         ` LIU Zhiwei
2021-05-26  6:15         ` Palmer Dabbelt
2021-05-26  6:15           ` Palmer Dabbelt
2021-02-12 15:02 ` [PATCH 06/38] target/riscv: SIMD 16-bit Shift Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:25   ` Alistair Francis
2021-03-15 21:25     ` Alistair Francis
2021-03-16  2:40     ` LIU Zhiwei
2021-03-16  2:40       ` LIU Zhiwei
2021-03-16 19:54       ` Alistair Francis
2021-03-16 19:54         ` Alistair Francis
2021-03-17  2:30         ` LIU Zhiwei
2021-03-17  2:30           ` LIU Zhiwei
2021-03-17 20:39           ` Alistair Francis
2021-03-17 20:39             ` Alistair Francis
2021-02-12 15:02 ` [PATCH 07/38] target/riscv: SIMD 8-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:27   ` Alistair Francis
2021-03-15 21:27     ` Alistair Francis
2021-05-24  4:46   ` Palmer Dabbelt
2021-05-24  4:46     ` Palmer Dabbelt
2021-02-12 15:02 ` [PATCH 08/38] target/riscv: SIMD 16-bit Compare Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:28   ` Alistair Francis
2021-03-15 21:28     ` Alistair Francis
2021-05-26  5:30   ` Palmer Dabbelt
2021-05-26  5:30     ` Palmer Dabbelt
2021-05-26  5:31     ` Palmer Dabbelt
2021-05-26  5:31       ` Palmer Dabbelt
2021-02-12 15:02 ` [PATCH 09/38] target/riscv: SIMD 8-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:31   ` Alistair Francis
2021-03-15 21:31     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 10/38] target/riscv: SIMD 16-bit Multiply Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 11/38] target/riscv: SIMD 8-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:33   ` Alistair Francis
2021-03-15 21:33     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 12/38] target/riscv: SIMD 16-bit Miscellaneous Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-15 21:35   ` Alistair Francis
2021-03-15 21:35     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 13/38] target/riscv: SIMD 8-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-16 14:38   ` Alistair Francis
2021-03-16 14:38     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 14/38] target/riscv: 8-bit Unpacking Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-16 14:40   ` Alistair Francis
2021-03-16 14:40     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 15/38] target/riscv: 16-bit Packing Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-16 14:42   ` Alistair Francis
2021-03-16 14:42     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 16/38] target/riscv: Signed MSW 32x32 Multiply and Add Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 17/38] target/riscv: Signed MSW 32x16 " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-16 16:01   ` Alistair Francis
2021-03-16 16:01     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 18/38] target/riscv: Signed 16-bit Multiply 32-bit Add/Subtract Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 19/38] target/riscv: Signed 16-bit Multiply 64-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 20/38] target/riscv: Partial-SIMD Miscellaneous Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-16 19:44   ` Alistair Francis
2021-03-16 19:44     ` Alistair Francis
2021-02-12 15:02 ` [PATCH 21/38] target/riscv: 8-bit Multiply with 32-bit Add Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 22/38] target/riscv: 64-bit Add/Subtract Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 23/38] target/riscv: 32-bit Multiply " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 24/38] target/riscv: Signed 16-bit Multiply with " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 25/38] target/riscv: Non-SIMD Q15 saturation ALU Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 26/38] target/riscv: Non-SIMD Q31 " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 27/38] target/riscv: 32-bit Computation Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 28/38] target/riscv: Non-SIMD Miscellaneous Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 29/38] target/riscv: RV64 Only SIMD 32-bit Add/Subtract Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 30/38] target/riscv: RV64 Only SIMD 32-bit Shift Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 31/38] target/riscv: RV64 Only SIMD 32-bit Miscellaneous Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 32/38] target/riscv: RV64 Only SIMD Q15 saturating Multiply Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 33/38] target/riscv: RV64 Only 32-bit " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 34/38] target/riscv: RV64 Only 32-bit Multiply & Add Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 35/38] target/riscv: RV64 Only 32-bit Parallel " LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 36/38] target/riscv: RV64 Only Non-SIMD 32-bit Shift Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 37/38] target/riscv: RV64 Only 32-bit Packing Instructions LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-02-12 15:02 ` [PATCH 38/38] target/riscv: configure and turn on packed extension from command line LIU Zhiwei
2021-02-12 15:02   ` LIU Zhiwei
2021-03-05  6:14 ` [PATCH 00/38] target/riscv: support packed extension v0.9.2 LIU Zhiwei
2021-03-05  6:14   ` LIU Zhiwei
2021-04-13  3:27 ` LIU Zhiwei
2021-04-13  3:27   ` LIU Zhiwei
2021-04-15  4:46   ` Alistair Francis
2021-04-15  4:46     ` Alistair Francis
2021-04-15  5:50     ` LIU Zhiwei
2021-04-15  5:50       ` LIU Zhiwei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAKmqyKM5AAAi42p-mMot_dhrcs59QDAbVdgHpnvtZr-3YEKnaQ@mail.gmail.com \
    --to=alistair23@gmail.com \
    --cc=palmer@dabbelt.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-riscv@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=zhiwei_liu@c-sky.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.