All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: Luis Pires <luis.pires@eldorado.org.br>
Subject: [PULL 38/56] tcg/optimize: Add type to OptContext
Date: Wed, 27 Oct 2021 19:41:13 -0700	[thread overview]
Message-ID: <20211028024131.1492790-39-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211028024131.1492790-1-richard.henderson@linaro.org>

Compute the type of the operation early.

There are at least 4 places that used a def->flags ladder
to determine the type of the operation being optimized.

There were two places that assumed !TCG_OPF_64BIT means
TCG_TYPE_I32, and so could potentially compute incorrect
results for vector operations.

Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
 1 file changed, 89 insertions(+), 60 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index cfdc53c964..e869fa7e78 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -51,6 +51,7 @@ typedef struct OptContext {
 
     /* In flight values from optimization. */
     uint64_t z_mask;
+    TCGType type;
 } OptContext;
 
 static inline TempOptInfo *ts_info(TCGTemp *ts)
@@ -187,7 +188,6 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
 {
     TCGTemp *dst_ts = arg_temp(dst);
     TCGTemp *src_ts = arg_temp(src);
-    const TCGOpDef *def;
     TempOptInfo *di;
     TempOptInfo *si;
     uint64_t z_mask;
@@ -201,16 +201,24 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
     reset_ts(dst_ts);
     di = ts_info(dst_ts);
     si = ts_info(src_ts);
-    def = &tcg_op_defs[op->opc];
-    if (def->flags & TCG_OPF_VECTOR) {
-        new_op = INDEX_op_mov_vec;
-    } else if (def->flags & TCG_OPF_64BIT) {
-        new_op = INDEX_op_mov_i64;
-    } else {
+
+    switch (ctx->type) {
+    case TCG_TYPE_I32:
         new_op = INDEX_op_mov_i32;
+        break;
+    case TCG_TYPE_I64:
+        new_op = INDEX_op_mov_i64;
+        break;
+    case TCG_TYPE_V64:
+    case TCG_TYPE_V128:
+    case TCG_TYPE_V256:
+        /* TCGOP_VECL and TCGOP_VECE remain unchanged.  */
+        new_op = INDEX_op_mov_vec;
+        break;
+    default:
+        g_assert_not_reached();
     }
     op->opc = new_op;
-    /* TCGOP_VECL and TCGOP_VECE remain unchanged.  */
     op->args[0] = dst;
     op->args[1] = src;
 
@@ -237,20 +245,9 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
 static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
                              TCGArg dst, uint64_t val)
 {
-    const TCGOpDef *def = &tcg_op_defs[op->opc];
-    TCGType type;
-    TCGTemp *tv;
-
-    if (def->flags & TCG_OPF_VECTOR) {
-        type = TCGOP_VECL(op) + TCG_TYPE_V64;
-    } else if (def->flags & TCG_OPF_64BIT) {
-        type = TCG_TYPE_I64;
-    } else {
-        type = TCG_TYPE_I32;
-    }
-
     /* Convert movi to mov with constant temp. */
-    tv = tcg_constant_internal(type, val);
+    TCGTemp *tv = tcg_constant_internal(ctx->type, val);
+
     init_ts_info(ctx, tv);
     return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
 }
@@ -420,11 +417,11 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
     }
 }
 
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
+                                    uint64_t x, uint64_t y)
 {
-    const TCGOpDef *def = &tcg_op_defs[op];
     uint64_t res = do_constant_folding_2(op, x, y);
-    if (!(def->flags & TCG_OPF_64BIT)) {
+    if (type == TCG_TYPE_I32) {
         res = (int32_t)res;
     }
     return res;
@@ -510,19 +507,21 @@ static bool do_constant_folding_cond_eq(TCGCond c)
  * Return -1 if the condition can't be simplified,
  * and the result of the condition (0 or 1) if it can.
  */
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
+static int do_constant_folding_cond(TCGType type, TCGArg x,
                                     TCGArg y, TCGCond c)
 {
     uint64_t xv = arg_info(x)->val;
     uint64_t yv = arg_info(y)->val;
 
     if (arg_is_const(x) && arg_is_const(y)) {
-        const TCGOpDef *def = &tcg_op_defs[op];
-        tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
-        if (def->flags & TCG_OPF_64BIT) {
-            return do_constant_folding_cond_64(xv, yv, c);
-        } else {
+        switch (type) {
+        case TCG_TYPE_I32:
             return do_constant_folding_cond_32(xv, yv, c);
+        case TCG_TYPE_I64:
+            return do_constant_folding_cond_64(xv, yv, c);
+        default:
+            /* Only scalar comparisons are optimizable */
+            return -1;
         }
     } else if (args_are_copies(x, y)) {
         return do_constant_folding_cond_eq(c);
@@ -677,7 +676,7 @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
         uint64_t t;
 
         t = arg_info(op->args[1])->val;
-        t = do_constant_folding(op->opc, t, 0);
+        t = do_constant_folding(op->opc, ctx->type, t, 0);
         return tcg_opt_gen_movi(ctx, op, op->args[0], t);
     }
     return false;
@@ -689,7 +688,7 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
         uint64_t t1 = arg_info(op->args[1])->val;
         uint64_t t2 = arg_info(op->args[2])->val;
 
-        t1 = do_constant_folding(op->opc, t1, t2);
+        t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
         return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
     }
     return false;
@@ -791,7 +790,7 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
 static bool fold_brcond(OptContext *ctx, TCGOp *op)
 {
     TCGCond cond = op->args[2];
-    int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
+    int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
 
     if (i == 0) {
         tcg_op_remove(ctx->tcg, op);
@@ -836,7 +835,7 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
          * Simplify EQ/NE comparisons where one of the pairs
          * can be simplified.
          */
-        i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
+        i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
                                      op->args[2], cond);
         switch (i ^ inv) {
         case 0:
@@ -845,7 +844,7 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
             goto do_brcond_high;
         }
 
-        i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
+        i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
                                      op->args[3], cond);
         switch (i ^ inv) {
         case 0:
@@ -887,7 +886,7 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
     if (arg_is_const(op->args[1])) {
         uint64_t t = arg_info(op->args[1])->val;
 
-        t = do_constant_folding(op->opc, t, op->args[2]);
+        t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
         return tcg_opt_gen_movi(ctx, op, op->args[0], t);
     }
     return false;
@@ -931,7 +930,7 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
         uint64_t t = arg_info(op->args[1])->val;
 
         if (t != 0) {
-            t = do_constant_folding(op->opc, t, 0);
+            t = do_constant_folding(op->opc, ctx->type, t, 0);
             return tcg_opt_gen_movi(ctx, op, op->args[0], t);
         }
         return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
@@ -1063,9 +1062,8 @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
 
 static bool fold_movcond(OptContext *ctx, TCGOp *op)
 {
-    TCGOpcode opc = op->opc;
     TCGCond cond = op->args[5];
-    int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
+    int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
 
     if (i >= 0) {
         return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
@@ -1074,9 +1072,18 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
     if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
         uint64_t tv = arg_info(op->args[3])->val;
         uint64_t fv = arg_info(op->args[4])->val;
+        TCGOpcode opc;
 
-        opc = (opc == INDEX_op_movcond_i32
-               ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
+        switch (ctx->type) {
+        case TCG_TYPE_I32:
+            opc = INDEX_op_setcond_i32;
+            break;
+        case TCG_TYPE_I64:
+            opc = INDEX_op_setcond_i64;
+            break;
+        default:
+            g_assert_not_reached();
+        }
 
         if (tv == 1 && fv == 0) {
             op->opc = opc;
@@ -1181,7 +1188,7 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
 static bool fold_setcond(OptContext *ctx, TCGOp *op)
 {
     TCGCond cond = op->args[3];
-    int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
+    int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
 
     if (i >= 0) {
         return tcg_opt_gen_movi(ctx, op, op->args[0], i);
@@ -1220,7 +1227,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
          * Simplify EQ/NE comparisons where one of the pairs
          * can be simplified.
          */
-        i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
+        i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
                                      op->args[3], cond);
         switch (i ^ inv) {
         case 0:
@@ -1229,7 +1236,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
             goto do_setcond_high;
         }
 
-        i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
+        i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
                                      op->args[4], cond);
         switch (i ^ inv) {
         case 0:
@@ -1331,6 +1338,15 @@ void tcg_optimize(TCGContext *s)
         init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
         copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
 
+        /* Pre-compute the type of the operation. */
+        if (def->flags & TCG_OPF_VECTOR) {
+            ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
+        } else if (def->flags & TCG_OPF_64BIT) {
+            ctx.type = TCG_TYPE_I64;
+        } else {
+            ctx.type = TCG_TYPE_I32;
+        }
+
         /* For commutative operations make constant second argument */
         switch (opc) {
         CASE_OP_32_64_VEC(add):
@@ -1411,19 +1427,24 @@ void tcg_optimize(TCGContext *s)
                     /* Proceed with possible constant folding. */
                     break;
                 }
-                if (opc == INDEX_op_sub_i32) {
+                switch (ctx.type) {
+                case TCG_TYPE_I32:
                     neg_op = INDEX_op_neg_i32;
                     have_neg = TCG_TARGET_HAS_neg_i32;
-                } else if (opc == INDEX_op_sub_i64) {
+                    break;
+                case TCG_TYPE_I64:
                     neg_op = INDEX_op_neg_i64;
                     have_neg = TCG_TARGET_HAS_neg_i64;
-                } else if (TCG_TARGET_HAS_neg_vec) {
-                    TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
-                    unsigned vece = TCGOP_VECE(op);
-                    neg_op = INDEX_op_neg_vec;
-                    have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
-                } else {
                     break;
+                case TCG_TYPE_V64:
+                case TCG_TYPE_V128:
+                case TCG_TYPE_V256:
+                    neg_op = INDEX_op_neg_vec;
+                    have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
+                                                   TCGOP_VECE(op)) > 0;
+                    break;
+                default:
+                    g_assert_not_reached();
                 }
                 if (!have_neg) {
                     break;
@@ -1476,15 +1497,23 @@ void tcg_optimize(TCGContext *s)
                 TCGOpcode not_op;
                 bool have_not;
 
-                if (def->flags & TCG_OPF_VECTOR) {
-                    not_op = INDEX_op_not_vec;
-                    have_not = TCG_TARGET_HAS_not_vec;
-                } else if (def->flags & TCG_OPF_64BIT) {
-                    not_op = INDEX_op_not_i64;
-                    have_not = TCG_TARGET_HAS_not_i64;
-                } else {
+                switch (ctx.type) {
+                case TCG_TYPE_I32:
                     not_op = INDEX_op_not_i32;
                     have_not = TCG_TARGET_HAS_not_i32;
+                    break;
+                case TCG_TYPE_I64:
+                    not_op = INDEX_op_not_i64;
+                    have_not = TCG_TARGET_HAS_not_i64;
+                    break;
+                case TCG_TYPE_V64:
+                case TCG_TYPE_V128:
+                case TCG_TYPE_V256:
+                    not_op = INDEX_op_not_vec;
+                    have_not = TCG_TARGET_HAS_not_vec;
+                    break;
+                default:
+                    g_assert_not_reached();
                 }
                 if (!have_not) {
                     break;
@@ -1755,7 +1784,7 @@ void tcg_optimize(TCGContext *s)
            below, we can ignore high bits, but for further optimizations we
            need to record that the high bits contain garbage.  */
         partmask = z_mask;
-        if (!(def->flags & TCG_OPF_64BIT)) {
+        if (ctx.type == TCG_TYPE_I32) {
             z_mask |= ~(tcg_target_ulong)0xffffffffu;
             partmask &= 0xffffffffu;
             affected &= 0xffffffffu;
-- 
2.25.1



  parent reply	other threads:[~2021-10-28  3:32 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-28  2:40 [PULL 00/56] tcg patch queue Richard Henderson
2021-10-28  2:40 ` [PULL 01/56] qemu/int128: Add int128_{not,xor} Richard Henderson
2021-10-28  2:40 ` [PULL 02/56] host-utils: move checks out of divu128/divs128 Richard Henderson
2021-10-28  2:40 ` [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils Richard Henderson
2021-10-28  2:40 ` [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128 Richard Henderson
2021-10-28  2:40 ` [PULL 05/56] host-utils: add unit tests for divu128/divs128 Richard Henderson
2021-10-28  2:40 ` [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-28  2:40 ` [PULL 07/56] tcg/optimize: Split out OptContext Richard Henderson
2021-10-28  2:40 ` [PULL 08/56] tcg/optimize: Remove do_default label Richard Henderson
2021-10-28  2:40 ` [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface Richard Henderson
2021-10-28  2:40 ` [PULL 10/56] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-28  2:40 ` [PULL 11/56] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-28  2:40 ` [PULL 12/56] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-28  2:40 ` [PULL 13/56] tcg/optimize: Split out fold_call Richard Henderson
2021-10-28  2:40 ` [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-28  2:40 ` [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-28  2:40 ` [PULL 16/56] tcg/optimize: Return true from tcg_opt_gen_{mov,movi} Richard Henderson
2021-10-28  2:40 ` [PULL 17/56] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-28  2:40 ` [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-28  2:40 ` [PULL 19/56] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-28  2:40 ` [PULL 20/56] tcg/optimize: Split out fold_const{1,2} Richard Henderson
2021-10-28  2:40 ` [PULL 21/56] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-28  2:40 ` [PULL 22/56] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-28  2:40 ` [PULL 23/56] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-28  2:40 ` [PULL 24/56] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-28  2:41 ` [PULL 25/56] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-28  2:41 ` [PULL 26/56] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-28  2:41 ` [PULL 27/56] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-28  2:41 ` [PULL 28/56] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-10-28  2:41 ` [PULL 29/56] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-28  2:41 ` [PULL 30/56] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-28  2:41 ` [PULL 31/56] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-28  2:41 ` [PULL 32/56] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-28  2:41 ` [PULL 33/56] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-28  2:41 ` [PULL 34/56] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-28  2:41 ` [PULL 35/56] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-28  2:41 ` [PULL 36/56] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-28  2:41 ` [PULL 37/56] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-28  2:41 ` Richard Henderson [this message]
2021-10-28  2:41 ` [PULL 39/56] tcg/optimize: Split out fold_to_not Richard Henderson
2021-10-28  2:41 ` [PULL 40/56] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-28  2:41 ` [PULL 41/56] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-28  2:41 ` [PULL 42/56] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-28  2:41 ` [PULL 43/56] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-28  2:41 ` [PULL 44/56] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-28  2:41 ` [PULL 45/56] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
2021-10-28  2:41 ` [PULL 46/56] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-28  2:41 ` [PULL 47/56] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-28  2:41 ` [PULL 48/56] tcg/optimize: Use fold_xx_to_i for orc Richard Henderson
2021-10-28  2:41 ` [PULL 49/56] tcg/optimize: Use fold_xi_to_x for mul Richard Henderson
2021-10-28  2:41 ` [PULL 50/56] tcg/optimize: Use fold_xi_to_x for div Richard Henderson
2021-10-28  2:41 ` [PULL 51/56] tcg/optimize: Use fold_xx_to_i for rem Richard Henderson
2021-10-28  2:41 ` [PULL 52/56] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-28  2:41 ` [PULL 53/56] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-28  2:41 ` [PULL 54/56] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-28  2:41 ` [PULL 55/56] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-28  2:41 ` [PULL 56/56] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-28 14:51 ` [PULL 00/56] tcg patch queue Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211028024131.1492790-39-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=luis.pires@eldorado.org.br \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.