From: "Alex Bennée" <alex.bennee@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>
Cc: luis.pires@eldorado.org.br, qemu-devel@nongnu.org
Subject: Re: [PATCH v3 38/48] tcg/optimize: Split out fold_masks
Date: Tue, 26 Oct 2021 16:32:45 +0100 [thread overview]
Message-ID: <871r472203.fsf@linaro.org> (raw)
In-Reply-To: <20211021210539.825582-39-richard.henderson@linaro.org>
Richard Henderson <richard.henderson@linaro.org> writes:
> Move all of the known-zero optimizations into the per-opcode
> functions. Use fold_masks when there is a possibility of the
> result being determined, and simply set ctx->z_mask otherwise.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
> 1 file changed, 294 insertions(+), 251 deletions(-)
>
> diff --git a/tcg/optimize.c b/tcg/optimize.c
> index 6c1cc3e635..f0086ee789 100644
> --- a/tcg/optimize.c
> +++ b/tcg/optimize.c
> @@ -50,7 +50,8 @@ typedef struct OptContext {
> TCGTempSet temps_used;
>
> /* In flight values from optimization. */
> - uint64_t z_mask;
> + uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
> + uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
nit: too much iff?
> TCGType type;
> } OptContext;
>
> @@ -694,6 +695,31 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
> return false;
> }
>
> +static bool fold_masks(OptContext *ctx, TCGOp *op)
> +{
> + uint64_t a_mask = ctx->a_mask;
> + uint64_t z_mask = ctx->z_mask;
> +
> + /*
> + * 32-bit ops generate 32-bit results. For the result is zero test
> + * below, we can ignore high bits, but for further optimizations we
> + * need to record that the high bits contain garbage.
> + */
> + if (ctx->type == TCG_TYPE_I32) {
> + ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
> + a_mask &= MAKE_64BIT_MASK(0, 32);
> + z_mask &= MAKE_64BIT_MASK(0, 32);
> + }
> +
> + if (z_mask == 0) {
> + return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
> + }
> + if (a_mask == 0) {
> + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
> + }
> + return false;
> +}
> +
> /*
> * Convert @op to NOT, if NOT is supported by the host.
> * Return true f the conversion is successful, which will still
> @@ -847,24 +873,55 @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
>
> static bool fold_and(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z1, z2;
> +
> if (fold_const2(ctx, op) ||
> fold_xi_to_i(ctx, op, 0) ||
> fold_xi_to_x(ctx, op, -1) ||
> fold_xx_to_x(ctx, op)) {
> return true;
> }
> - return false;
> +
> + z1 = arg_info(op->args[1])->z_mask;
> + z2 = arg_info(op->args[2])->z_mask;
> + ctx->z_mask = z1 & z2;
> +
> + /*
> + * Known-zeros does not imply known-ones. Therefore unless
> + * arg2 is constant, we can't infer affected bits from it.
> + */
> + if (arg_is_const(op->args[2])) {
> + ctx->a_mask = z1 & ~z2;
> + }
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_andc(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z1;
> +
> if (fold_const2(ctx, op) ||
> fold_xx_to_i(ctx, op, 0) ||
> fold_xi_to_x(ctx, op, 0) ||
> fold_ix_to_not(ctx, op, -1)) {
> return true;
> }
> - return false;
> +
> + z1 = arg_info(op->args[1])->z_mask;
> +
> + /*
> + * Known-zeros does not imply known-ones. Therefore unless
> + * arg2 is constant, we can't infer anything from it.
> + */
> + if (arg_is_const(op->args[2])) {
> + uint64_t z2 = ~arg_info(op->args[2])->z_mask;
> + ctx->a_mask = z1 & ~z2;
> + z1 &= z2;
> + }
> + ctx->z_mask = z1;
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_brcond(OptContext *ctx, TCGOp *op)
> @@ -963,13 +1020,52 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
>
> static bool fold_bswap(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z_mask, sign;
> +
> if (arg_is_const(op->args[1])) {
> uint64_t t = arg_info(op->args[1])->val;
>
> t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
> return tcg_opt_gen_movi(ctx, op, op->args[0], t);
> }
> - return false;
> +
> + z_mask = arg_info(op->args[1])->z_mask;
> + switch (op->opc) {
> + case INDEX_op_bswap16_i32:
> + case INDEX_op_bswap16_i64:
> + z_mask = bswap16(z_mask);
> + sign = INT16_MIN;
> + break;
> + case INDEX_op_bswap32_i32:
> + case INDEX_op_bswap32_i64:
> + z_mask = bswap32(z_mask);
> + sign = INT32_MIN;
> + break;
> + case INDEX_op_bswap64_i64:
> + z_mask = bswap64(z_mask);
> + sign = INT64_MIN;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> +
> + switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
> + case TCG_BSWAP_OZ:
> + break;
> + case TCG_BSWAP_OS:
> + /* If the sign bit may be 1, force all the bits above to 1. */
> + if (z_mask & sign) {
> + z_mask |= sign;
> + }
> + break;
> + default:
> + /* The high bits are undefined: force all bits above the sign to 1. */
> + z_mask |= sign << 1;
> + break;
> + }
> + ctx->z_mask = z_mask;
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_call(OptContext *ctx, TCGOp *op)
> @@ -1006,6 +1102,8 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
>
> static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z_mask;
> +
> if (arg_is_const(op->args[1])) {
> uint64_t t = arg_info(op->args[1])->val;
>
> @@ -1015,12 +1113,39 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
> }
> return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
> }
> +
> + switch (ctx->type) {
> + case TCG_TYPE_I32:
> + z_mask = 31;
> + break;
> + case TCG_TYPE_I64:
> + z_mask = 63;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> + ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
> +
> return false;
> }
>
> static bool fold_ctpop(OptContext *ctx, TCGOp *op)
> {
> - return fold_const1(ctx, op);
> + if (fold_const1(ctx, op)) {
> + return true;
> + }
> +
> + switch (ctx->type) {
> + case TCG_TYPE_I32:
> + ctx->z_mask = 32 | 31;
> + break;
> + case TCG_TYPE_I64:
> + ctx->z_mask = 64 | 63;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> + return false;
> }
>
> static bool fold_deposit(OptContext *ctx, TCGOp *op)
> @@ -1032,6 +1157,10 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
> t1 = deposit64(t1, op->args[3], op->args[4], t2);
> return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
> }
> +
> + ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
> + op->args[3], op->args[4],
> + arg_info(op->args[2])->z_mask);
> return false;
> }
>
> @@ -1072,6 +1201,8 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
>
> static bool fold_extract(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z_mask_old, z_mask;
> +
> if (arg_is_const(op->args[1])) {
> uint64_t t;
>
> @@ -1079,7 +1210,15 @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
> t = extract64(t, op->args[2], op->args[3]);
> return tcg_opt_gen_movi(ctx, op, op->args[0], t);
> }
> - return false;
> +
> + z_mask_old = arg_info(op->args[1])->z_mask;
> + z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
> + if (op->args[2] == 0) {
> + ctx->a_mask = z_mask_old ^ z_mask;
> + }
> + ctx->z_mask = z_mask;
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_extract2(OptContext *ctx, TCGOp *op)
> @@ -1103,12 +1242,83 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
>
> static bool fold_exts(OptContext *ctx, TCGOp *op)
> {
> - return fold_const1(ctx, op);
> + uint64_t z_mask_old, z_mask, sign;
> + bool type_change = false;
> +
> + if (fold_const1(ctx, op)) {
> + return true;
> + }
> +
> + z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
> +
> + switch (op->opc) {
> + CASE_OP_32_64(ext8s):
> + sign = INT8_MIN;
> + z_mask = (uint8_t)z_mask;
> + break;
> + CASE_OP_32_64(ext16s):
> + sign = INT16_MIN;
> + z_mask = (uint16_t)z_mask;
> + break;
> + case INDEX_op_ext_i32_i64:
> + type_change = true;
> + QEMU_FALLTHROUGH;
> + case INDEX_op_ext32s_i64:
> + sign = INT32_MIN;
> + z_mask = (uint32_t)z_mask;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> +
> + if (z_mask & sign) {
> + z_mask |= sign;
> + } else if (!type_change) {
> + ctx->a_mask = z_mask_old ^ z_mask;
> + }
> + ctx->z_mask = z_mask;
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_extu(OptContext *ctx, TCGOp *op)
> {
> - return fold_const1(ctx, op);
> + uint64_t z_mask_old, z_mask;
> + bool type_change = false;
> +
> + if (fold_const1(ctx, op)) {
> + return true;
> + }
> +
> + z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
> +
> + switch (op->opc) {
> + CASE_OP_32_64(ext8u):
> + z_mask = (uint8_t)z_mask;
> + break;
> + CASE_OP_32_64(ext16u):
> + z_mask = (uint16_t)z_mask;
> + break;
> + case INDEX_op_extrl_i64_i32:
> + case INDEX_op_extu_i32_i64:
> + type_change = true;
> + QEMU_FALLTHROUGH;
> + case INDEX_op_ext32u_i64:
> + z_mask = (uint32_t)z_mask;
> + break;
> + case INDEX_op_extrh_i64_i32:
> + type_change = true;
> + z_mask >>= 32;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> +
> + ctx->z_mask = z_mask;
> + if (!type_change) {
> + ctx->a_mask = z_mask_old ^ z_mask;
> + }
> + return fold_masks(ctx, op);
> }
>
> static bool fold_mb(OptContext *ctx, TCGOp *op)
> @@ -1149,6 +1359,9 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
> return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
> }
>
> + ctx->z_mask = arg_info(op->args[3])->z_mask
> + | arg_info(op->args[4])->z_mask;
> +
> if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
> uint64_t tv = arg_info(op->args[3])->val;
> uint64_t fv = arg_info(op->args[4])->val;
> @@ -1214,9 +1427,16 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
>
> static bool fold_neg(OptContext *ctx, TCGOp *op)
> {
> + uint64_t z_mask;
> +
> if (fold_const1(ctx, op)) {
> return true;
> }
> +
> + /* Set to 1 all bits to the left of the rightmost. */
> + z_mask = arg_info(op->args[1])->z_mask;
> + ctx->z_mask = -(z_mask & -z_mask);
> +
> /*
> * Because of fold_sub_to_neg, we want to always return true,
> * via finish_folding.
> @@ -1252,7 +1472,10 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
> fold_xx_to_x(ctx, op)) {
> return true;
> }
> - return false;
> +
> + ctx->z_mask = arg_info(op->args[1])->z_mask
> + | arg_info(op->args[2])->z_mask;
> + return fold_masks(ctx, op);
> }
>
> static bool fold_orc(OptContext *ctx, TCGOp *op)
> @@ -1266,6 +1489,15 @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
>
> static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
> {
> + const TCGOpDef *def = &tcg_op_defs[op->opc];
> + MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
> + MemOp mop = get_memop(oi);
> + int width = 8 << (mop & MO_SIZE);
Given we have a helper memop_size() it might be worth adding another
memop_size_bits()?
> +
> + if (!(mop & MO_SIGN) && width < 64) {
> + ctx->z_mask = MAKE_64BIT_MASK(0, width);
> + }
> +
> /* Opcodes that touch guest memory stop the mb optimization. */
> ctx->prev_mb = NULL;
> return false;
> @@ -1286,6 +1518,8 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
> if (i >= 0) {
> return tcg_opt_gen_movi(ctx, op, op->args[0], i);
> }
> +
> + ctx->z_mask = 1;
> return false;
> }
>
> @@ -1352,6 +1586,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
> op->opc = INDEX_op_setcond_i32;
> break;
> }
> +
> + ctx->z_mask = 1;
> return false;
>
> do_setcond_const:
> @@ -1360,6 +1596,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
>
> static bool fold_sextract(OptContext *ctx, TCGOp *op)
> {
> + int64_t z_mask_old, z_mask;
> +
> if (arg_is_const(op->args[1])) {
> uint64_t t;
>
> @@ -1367,7 +1605,15 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
> t = sextract64(t, op->args[2], op->args[3]);
> return tcg_opt_gen_movi(ctx, op, op->args[0], t);
> }
> - return false;
> +
> + z_mask_old = arg_info(op->args[1])->z_mask;
> + z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
> + if (op->args[2] == 0 && z_mask >= 0) {
> + ctx->a_mask = z_mask_old ^ z_mask;
> + }
> + ctx->z_mask = z_mask;
> +
> + return fold_masks(ctx, op);
> }
>
> static bool fold_shift(OptContext *ctx, TCGOp *op)
> @@ -1377,6 +1623,13 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
> fold_xi_to_x(ctx, op, 0)) {
> return true;
> }
> +
> + if (arg_is_const(op->args[2])) {
> + ctx->z_mask = do_constant_folding(op->opc, ctx->type,
> + arg_info(op->args[1])->z_mask,
> + arg_info(op->args[2])->val);
> + return fold_masks(ctx, op);
> + }
> return false;
> }
>
> @@ -1432,6 +1685,25 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
> return fold_addsub2_i32(ctx, op, false);
> }
>
> +static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
> +{
> + /* We can't do any folding with a load, but we can record bits. */
> + switch (op->opc) {
> + CASE_OP_32_64(ld8u):
> + ctx->z_mask = 0xff;
> + break;
> + CASE_OP_32_64(ld16u):
> + ctx->z_mask = 0xffff;
> + break;
> + case INDEX_op_ld32u_i64:
> + ctx->z_mask = 0xffffffffu;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> + return false;
Given we use MAKE_64BIT_MASK elsewhere we should do here as well.
> +}
> +
> static bool fold_xor(OptContext *ctx, TCGOp *op)
> {
> if (fold_const2(ctx, op) ||
> @@ -1440,7 +1712,10 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
> fold_xi_to_not(ctx, op, -1)) {
> return true;
> }
> - return false;
> +
> + ctx->z_mask = arg_info(op->args[1])->z_mask
> + | arg_info(op->args[2])->z_mask;
> + return fold_masks(ctx, op);
> }
>
> /* Propagate constants and copies, fold constant expressions. */
> @@ -1461,7 +1736,6 @@ void tcg_optimize(TCGContext *s)
> }
>
> QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
> - uint64_t z_mask, partmask, affected, tmp;
> TCGOpcode opc = op->opc;
> const TCGOpDef *def;
> bool done = false;
> @@ -1542,245 +1816,9 @@ void tcg_optimize(TCGContext *s)
> break;
> }
>
> - /* Simplify using known-zero bits. Currently only ops with a single
> - output argument is supported. */
> - z_mask = -1;
> - affected = -1;
> - switch (opc) {
> - CASE_OP_32_64(ext8s):
> - if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
> - break;
> - }
> - QEMU_FALLTHROUGH;
> - CASE_OP_32_64(ext8u):
> - z_mask = 0xff;
> - goto and_const;
> - CASE_OP_32_64(ext16s):
> - if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
> - break;
> - }
> - QEMU_FALLTHROUGH;
> - CASE_OP_32_64(ext16u):
> - z_mask = 0xffff;
> - goto and_const;
> - case INDEX_op_ext32s_i64:
> - if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
> - break;
> - }
> - QEMU_FALLTHROUGH;
> - case INDEX_op_ext32u_i64:
> - z_mask = 0xffffffffU;
> - goto and_const;
> -
> - CASE_OP_32_64(and):
> - z_mask = arg_info(op->args[2])->z_mask;
> - if (arg_is_const(op->args[2])) {
> - and_const:
> - affected = arg_info(op->args[1])->z_mask & ~z_mask;
> - }
> - z_mask = arg_info(op->args[1])->z_mask & z_mask;
> - break;
> -
> - case INDEX_op_ext_i32_i64:
> - if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
> - break;
> - }
> - QEMU_FALLTHROUGH;
> - case INDEX_op_extu_i32_i64:
> - /* We do not compute affected as it is a size changing op. */
> - z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
> - break;
> -
> - CASE_OP_32_64(andc):
> - /* Known-zeros does not imply known-ones. Therefore unless
> - op->args[2] is constant, we can't infer anything from it. */
> - if (arg_is_const(op->args[2])) {
> - z_mask = ~arg_info(op->args[2])->z_mask;
> - goto and_const;
> - }
> - /* But we certainly know nothing outside args[1] may be set. */
> - z_mask = arg_info(op->args[1])->z_mask;
> - break;
> -
> - case INDEX_op_sar_i32:
> - if (arg_is_const(op->args[2])) {
> - tmp = arg_info(op->args[2])->val & 31;
> - z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
> - }
> - break;
> - case INDEX_op_sar_i64:
> - if (arg_is_const(op->args[2])) {
> - tmp = arg_info(op->args[2])->val & 63;
> - z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
> - }
> - break;
> -
> - case INDEX_op_shr_i32:
> - if (arg_is_const(op->args[2])) {
> - tmp = arg_info(op->args[2])->val & 31;
> - z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
> - }
> - break;
> - case INDEX_op_shr_i64:
> - if (arg_is_const(op->args[2])) {
> - tmp = arg_info(op->args[2])->val & 63;
> - z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
> - }
> - break;
> -
> - case INDEX_op_extrl_i64_i32:
> - z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
> - break;
> - case INDEX_op_extrh_i64_i32:
> - z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
> - break;
> -
> - CASE_OP_32_64(shl):
> - if (arg_is_const(op->args[2])) {
> - tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
> - z_mask = arg_info(op->args[1])->z_mask << tmp;
> - }
> - break;
> -
> - CASE_OP_32_64(neg):
> - /* Set to 1 all bits to the left of the rightmost. */
> - z_mask = -(arg_info(op->args[1])->z_mask
> - & -arg_info(op->args[1])->z_mask);
> - break;
> -
> - CASE_OP_32_64(deposit):
> - z_mask = deposit64(arg_info(op->args[1])->z_mask,
> - op->args[3], op->args[4],
> - arg_info(op->args[2])->z_mask);
> - break;
> -
> - CASE_OP_32_64(extract):
> - z_mask = extract64(arg_info(op->args[1])->z_mask,
> - op->args[2], op->args[3]);
> - if (op->args[2] == 0) {
> - affected = arg_info(op->args[1])->z_mask & ~z_mask;
> - }
> - break;
> - CASE_OP_32_64(sextract):
> - z_mask = sextract64(arg_info(op->args[1])->z_mask,
> - op->args[2], op->args[3]);
> - if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
> - affected = arg_info(op->args[1])->z_mask & ~z_mask;
> - }
> - break;
> -
> - CASE_OP_32_64(or):
> - CASE_OP_32_64(xor):
> - z_mask = arg_info(op->args[1])->z_mask
> - | arg_info(op->args[2])->z_mask;
> - break;
> -
> - case INDEX_op_clz_i32:
> - case INDEX_op_ctz_i32:
> - z_mask = arg_info(op->args[2])->z_mask | 31;
> - break;
> -
> - case INDEX_op_clz_i64:
> - case INDEX_op_ctz_i64:
> - z_mask = arg_info(op->args[2])->z_mask | 63;
> - break;
> -
> - case INDEX_op_ctpop_i32:
> - z_mask = 32 | 31;
> - break;
> - case INDEX_op_ctpop_i64:
> - z_mask = 64 | 63;
> - break;
> -
> - CASE_OP_32_64(setcond):
> - case INDEX_op_setcond2_i32:
> - z_mask = 1;
> - break;
> -
> - CASE_OP_32_64(movcond):
> - z_mask = arg_info(op->args[3])->z_mask
> - | arg_info(op->args[4])->z_mask;
> - break;
> -
> - CASE_OP_32_64(ld8u):
> - z_mask = 0xff;
> - break;
> - CASE_OP_32_64(ld16u):
> - z_mask = 0xffff;
> - break;
> - case INDEX_op_ld32u_i64:
> - z_mask = 0xffffffffu;
> - break;
> -
> - CASE_OP_32_64(qemu_ld):
> - {
> - MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
> - MemOp mop = get_memop(oi);
> - if (!(mop & MO_SIGN)) {
> - z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
> - }
> - }
> - break;
> -
> - CASE_OP_32_64(bswap16):
> - z_mask = arg_info(op->args[1])->z_mask;
> - if (z_mask <= 0xffff) {
> - op->args[2] |= TCG_BSWAP_IZ;
> - }
> - z_mask = bswap16(z_mask);
> - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
> - case TCG_BSWAP_OZ:
> - break;
> - case TCG_BSWAP_OS:
> - z_mask = (int16_t)z_mask;
> - break;
> - default: /* undefined high bits */
> - z_mask |= MAKE_64BIT_MASK(16, 48);
> - break;
> - }
> - break;
> -
> - case INDEX_op_bswap32_i64:
> - z_mask = arg_info(op->args[1])->z_mask;
> - if (z_mask <= 0xffffffffu) {
> - op->args[2] |= TCG_BSWAP_IZ;
> - }
> - z_mask = bswap32(z_mask);
> - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
> - case TCG_BSWAP_OZ:
> - break;
> - case TCG_BSWAP_OS:
> - z_mask = (int32_t)z_mask;
> - break;
> - default: /* undefined high bits */
> - z_mask |= MAKE_64BIT_MASK(32, 32);
> - break;
> - }
> - break;
> -
> - default:
> - break;
> - }
> -
> - /* 32-bit ops generate 32-bit results. For the result is zero test
> - below, we can ignore high bits, but for further optimizations we
> - need to record that the high bits contain garbage. */
> - partmask = z_mask;
> - if (ctx.type == TCG_TYPE_I32) {
> - z_mask |= ~(tcg_target_ulong)0xffffffffu;
> - partmask &= 0xffffffffu;
> - affected &= 0xffffffffu;
> - }
> - ctx.z_mask = z_mask;
> -
> - if (partmask == 0) {
> - tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
> - continue;
> - }
> - if (affected == 0) {
> - tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
> - continue;
> - }
> + /* Assume all bits affected, and no bits known zero. */
> + ctx.a_mask = -1;
> + ctx.z_mask = -1;
>
> /*
> * Process each opcode.
> @@ -1853,6 +1891,11 @@ void tcg_optimize(TCGContext *s)
> case INDEX_op_extrh_i64_i32:
> done = fold_extu(&ctx, op);
> break;
> + CASE_OP_32_64(ld8u):
> + CASE_OP_32_64(ld16u):
> + case INDEX_op_ld32u_i64:
> + done = fold_tcg_ld(&ctx, op);
> + break;
> case INDEX_op_mb:
> done = fold_mb(&ctx, op);
> break;
Otherwise:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
next prev parent reply other threads:[~2021-10-26 16:24 UTC|newest]
Thread overview: 108+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-21 21:04 [PATCH v3 00/48] tcg: optimize redundant sign extensions Richard Henderson
2021-10-21 21:04 ` [PATCH v3 01/48] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-21 21:04 ` [PATCH v3 02/48] tcg/optimize: Split out OptContext Richard Henderson
2021-10-22 14:53 ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 03/48] tcg/optimize: Remove do_default label Richard Henderson
2021-10-22 15:12 ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 04/48] tcg/optimize: Change tcg_opt_gen_{mov, movi} interface Richard Henderson
2021-10-21 21:04 ` [PATCH v3 05/48] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-22 15:13 ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 06/48] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-21 21:04 ` [PATCH v3 07/48] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-21 21:04 ` [PATCH v3 08/48] tcg/optimize: Split out fold_call Richard Henderson
2021-10-21 21:05 ` [PATCH v3 09/48] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-21 21:05 ` [PATCH v3 10/48] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-22 13:53 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov, movi} Richard Henderson
2021-10-26 15:13 ` [PATCH v3 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov,movi} Alex Bennée
2021-10-21 21:05 ` [PATCH v3 12/48] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-26 15:16 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 13/48] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-26 15:17 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 14/48] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-22 13:56 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 15/48] tcg/optimize: Split out fold_const{1,2} Richard Henderson
2021-10-26 15:19 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 16/48] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-26 15:31 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 17/48] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-22 17:26 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 18/48] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-22 13:58 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 19/48] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-22 13:59 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 20/48] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-22 14:00 ` Philippe Mathieu-Daudé
2021-10-22 17:32 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 21/48] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-22 14:01 ` Philippe Mathieu-Daudé
2021-10-22 17:33 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 22/48] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-22 14:05 ` Philippe Mathieu-Daudé
2021-10-22 17:34 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 23/48] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-10-22 14:07 ` Philippe Mathieu-Daudé
2021-10-22 17:34 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 24/48] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-22 14:08 ` Philippe Mathieu-Daudé
2021-10-22 17:34 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 25/48] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-22 14:09 ` Philippe Mathieu-Daudé
2021-10-22 17:35 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 26/48] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-22 14:10 ` Philippe Mathieu-Daudé
2021-10-22 17:35 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 27/48] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-22 17:36 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 28/48] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-22 14:12 ` Philippe Mathieu-Daudé
2021-10-22 17:36 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 29/48] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-22 17:39 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 30/48] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-22 21:04 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 31/48] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-22 21:04 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 32/48] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-22 21:17 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 33/48] tcg/optimize: Add type to OptContext Richard Henderson
2021-10-22 22:11 ` Luis Fernando Fujita Pires
2021-10-23 18:25 ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 34/48] tcg/optimize: Split out fold_to_not Richard Henderson
2021-10-25 14:17 ` Luis Fernando Fujita Pires
2021-10-25 17:31 ` Richard Henderson
2021-10-25 18:13 ` Luis Fernando Fujita Pires
2021-10-25 18:35 ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 35/48] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-25 14:17 ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 36/48] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-25 14:26 ` Luis Fernando Fujita Pires
2021-10-25 17:46 ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 37/48] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-25 14:26 ` Luis Fernando Fujita Pires
2021-10-26 19:11 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 38/48] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-26 15:32 ` Alex Bennée [this message]
2021-10-26 18:50 ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-22 14:16 ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
2021-10-26 16:21 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 41/48] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-26 16:27 ` Alex Bennée
2021-10-26 19:33 ` Richard Henderson
2021-10-27 13:22 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 42/48] tcg/optimize: Add more simplifications for orc Richard Henderson
2021-10-26 16:22 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-26 16:35 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 44/48] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-26 16:34 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 45/48] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-26 16:35 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 46/48] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-26 16:36 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 47/48] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-26 16:36 ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 48/48] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-26 16:37 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=871r472203.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=luis.pires@eldorado.org.br \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).