All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: luis.pires@eldorado.org.br, alex.bennee@linaro.org
Subject: [PATCH v3 15/48] tcg/optimize: Split out fold_const{1,2}
Date: Thu, 21 Oct 2021 14:05:06 -0700	[thread overview]
Message-ID: <20211021210539.825582-16-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211021210539.825582-1-richard.henderson@linaro.org>

Split out a whole bunch of placeholder functions, which are
currently identical.  That won't last as more code gets moved.

Use CASE_32_64_VEC for some logical operators that previously
missed the addition of vectors.

Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/optimize.c | 254 +++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 202 insertions(+), 52 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 159a5a9ee5..e66d935808 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -660,6 +660,60 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
     }
 }
 
+/*
+ * The fold_* functions return true when processing is complete,
+ * usually by folding the operation to a constant or to a copy,
+ * and calling tcg_opt_gen_{mov,movi}.  They may do other things,
+ * like collect information about the value produced, for use in
+ * optimizing a subsequent operation.
+ *
+ * These first fold_* functions are all helpers, used by other
+ * folders for more specific operations.
+ */
+
+static bool fold_const1(OptContext *ctx, TCGOp *op)
+{
+    if (arg_is_const(op->args[1])) {
+        uint64_t t;
+
+        t = arg_info(op->args[1])->val;
+        t = do_constant_folding(op->opc, t, 0);
+        return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+    }
+    return false;
+}
+
+static bool fold_const2(OptContext *ctx, TCGOp *op)
+{
+    if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+        uint64_t t1 = arg_info(op->args[1])->val;
+        uint64_t t2 = arg_info(op->args[2])->val;
+
+        t1 = do_constant_folding(op->opc, t1, t2);
+        return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+    }
+    return false;
+}
+
+/*
+ * These outermost fold_<op> functions are sorted alphabetically.
+ */
+
+static bool fold_add(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_and(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_andc(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 static bool fold_call(OptContext *ctx, TCGOp *op)
 {
     TCGContext *s = ctx->tcg;
@@ -692,6 +746,26 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
     return true;
 }
 
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_exts(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_extu(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
 static bool fold_mb(OptContext *ctx, TCGOp *op)
 {
     /* Eliminate duplicate and redundant fence instructions.  */
@@ -716,6 +790,41 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
     return true;
 }
 
+static bool fold_multiply(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_nand(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_neg(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_nor(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_not(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_or(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_orc(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
 {
     /* Opcodes that touch guest memory stop the mb optimization.  */
@@ -730,6 +839,21 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
     return false;
 }
 
+static bool fold_shift(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_xor(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 /* Propagate constants and copies, fold constant expressions. */
 void tcg_optimize(TCGContext *s)
 {
@@ -1276,26 +1400,6 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
-        CASE_OP_32_64(not):
-        CASE_OP_32_64(neg):
-        CASE_OP_32_64(ext8s):
-        CASE_OP_32_64(ext8u):
-        CASE_OP_32_64(ext16s):
-        CASE_OP_32_64(ext16u):
-        CASE_OP_32_64(ctpop):
-        case INDEX_op_ext32s_i64:
-        case INDEX_op_ext32u_i64:
-        case INDEX_op_ext_i32_i64:
-        case INDEX_op_extu_i32_i64:
-        case INDEX_op_extrl_i64_i32:
-        case INDEX_op_extrh_i64_i32:
-            if (arg_is_const(op->args[1])) {
-                tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
-                tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
-                continue;
-            }
-            break;
-
         CASE_OP_32_64(bswap16):
         CASE_OP_32_64(bswap32):
         case INDEX_op_bswap64_i64:
@@ -1307,36 +1411,6 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
-        CASE_OP_32_64(add):
-        CASE_OP_32_64(sub):
-        CASE_OP_32_64(mul):
-        CASE_OP_32_64(or):
-        CASE_OP_32_64(and):
-        CASE_OP_32_64(xor):
-        CASE_OP_32_64(shl):
-        CASE_OP_32_64(shr):
-        CASE_OP_32_64(sar):
-        CASE_OP_32_64(rotl):
-        CASE_OP_32_64(rotr):
-        CASE_OP_32_64(andc):
-        CASE_OP_32_64(orc):
-        CASE_OP_32_64(eqv):
-        CASE_OP_32_64(nand):
-        CASE_OP_32_64(nor):
-        CASE_OP_32_64(muluh):
-        CASE_OP_32_64(mulsh):
-        CASE_OP_32_64(div):
-        CASE_OP_32_64(divu):
-        CASE_OP_32_64(rem):
-        CASE_OP_32_64(remu):
-            if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
-                tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
-                                          arg_info(op->args[2])->val);
-                tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
-                continue;
-            }
-            break;
-
         CASE_OP_32_64(clz):
         CASE_OP_32_64(ctz):
             if (arg_is_const(op->args[1])) {
@@ -1637,9 +1711,71 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
+        default:
+            break;
+
+        /* ---------------------------------------------------------- */
+        /* Sorted alphabetically by opcode as much as possible. */
+
+        CASE_OP_32_64_VEC(add):
+            done = fold_add(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(and):
+            done = fold_and(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(andc):
+            done = fold_andc(&ctx, op);
+            break;
+        CASE_OP_32_64(ctpop):
+            done = fold_ctpop(&ctx, op);
+            break;
+        CASE_OP_32_64(div):
+        CASE_OP_32_64(divu):
+            done = fold_const2(&ctx, op);
+            break;
+        CASE_OP_32_64(eqv):
+            done = fold_eqv(&ctx, op);
+            break;
+        CASE_OP_32_64(ext8s):
+        CASE_OP_32_64(ext16s):
+        case INDEX_op_ext32s_i64:
+        case INDEX_op_ext_i32_i64:
+            done = fold_exts(&ctx, op);
+            break;
+        CASE_OP_32_64(ext8u):
+        CASE_OP_32_64(ext16u):
+        case INDEX_op_ext32u_i64:
+        case INDEX_op_extu_i32_i64:
+        case INDEX_op_extrl_i64_i32:
+        case INDEX_op_extrh_i64_i32:
+            done = fold_extu(&ctx, op);
+            break;
         case INDEX_op_mb:
             done = fold_mb(&ctx, op);
             break;
+        CASE_OP_32_64(mul):
+        CASE_OP_32_64(mulsh):
+        CASE_OP_32_64(muluh):
+            done = fold_multiply(&ctx, op);
+            break;
+        CASE_OP_32_64(nand):
+            done = fold_nand(&ctx, op);
+            break;
+        CASE_OP_32_64(neg):
+            done = fold_neg(&ctx, op);
+            break;
+        CASE_OP_32_64(nor):
+            done = fold_nor(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(not):
+            done = fold_not(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(or):
+            done = fold_or(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(orc):
+            done = fold_orc(&ctx, op);
+            break;
         case INDEX_op_qemu_ld_i32:
         case INDEX_op_qemu_ld_i64:
             done = fold_qemu_ld(&ctx, op);
@@ -1649,8 +1785,22 @@ void tcg_optimize(TCGContext *s)
         case INDEX_op_qemu_st_i64:
             done = fold_qemu_st(&ctx, op);
             break;
-
-        default:
+        CASE_OP_32_64(rem):
+        CASE_OP_32_64(remu):
+            done = fold_const2(&ctx, op);
+            break;
+        CASE_OP_32_64(rotl):
+        CASE_OP_32_64(rotr):
+        CASE_OP_32_64(sar):
+        CASE_OP_32_64(shl):
+        CASE_OP_32_64(shr):
+            done = fold_shift(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(sub):
+            done = fold_sub(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(xor):
+            done = fold_xor(&ctx, op);
             break;
         }
 
-- 
2.25.1



  parent reply	other threads:[~2021-10-21 21:13 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-21 21:04 [PATCH v3 00/48] tcg: optimize redundant sign extensions Richard Henderson
2021-10-21 21:04 ` [PATCH v3 01/48] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-21 21:04 ` [PATCH v3 02/48] tcg/optimize: Split out OptContext Richard Henderson
2021-10-22 14:53   ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 03/48] tcg/optimize: Remove do_default label Richard Henderson
2021-10-22 15:12   ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 04/48] tcg/optimize: Change tcg_opt_gen_{mov, movi} interface Richard Henderson
2021-10-21 21:04 ` [PATCH v3 05/48] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-22 15:13   ` Philippe Mathieu-Daudé
2021-10-21 21:04 ` [PATCH v3 06/48] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-21 21:04 ` [PATCH v3 07/48] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-21 21:04 ` [PATCH v3 08/48] tcg/optimize: Split out fold_call Richard Henderson
2021-10-21 21:05 ` [PATCH v3 09/48] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-21 21:05 ` [PATCH v3 10/48] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-22 13:53   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov, movi} Richard Henderson
2021-10-26 15:13   ` [PATCH v3 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov,movi} Alex Bennée
2021-10-21 21:05 ` [PATCH v3 12/48] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-26 15:16   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 13/48] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-26 15:17   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 14/48] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-22 13:56   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` Richard Henderson [this message]
2021-10-26 15:19   ` [PATCH v3 15/48] tcg/optimize: Split out fold_const{1,2} Alex Bennée
2021-10-21 21:05 ` [PATCH v3 16/48] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-26 15:31   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 17/48] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-22 17:26   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 18/48] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-22 13:58   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 19/48] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-22 13:59   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 20/48] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-22 14:00   ` Philippe Mathieu-Daudé
2021-10-22 17:32   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 21/48] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-22 14:01   ` Philippe Mathieu-Daudé
2021-10-22 17:33   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 22/48] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-22 14:05   ` Philippe Mathieu-Daudé
2021-10-22 17:34   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 23/48] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-10-22 14:07   ` Philippe Mathieu-Daudé
2021-10-22 17:34   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 24/48] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-22 14:08   ` Philippe Mathieu-Daudé
2021-10-22 17:34   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 25/48] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-22 14:09   ` Philippe Mathieu-Daudé
2021-10-22 17:35   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 26/48] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-22 14:10   ` Philippe Mathieu-Daudé
2021-10-22 17:35   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 27/48] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-22 17:36   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 28/48] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-22 14:12   ` Philippe Mathieu-Daudé
2021-10-22 17:36   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 29/48] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-22 17:39   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 30/48] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-22 21:04   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 31/48] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-22 21:04   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 32/48] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-22 21:17   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 33/48] tcg/optimize: Add type to OptContext Richard Henderson
2021-10-22 22:11   ` Luis Fernando Fujita Pires
2021-10-23 18:25     ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 34/48] tcg/optimize: Split out fold_to_not Richard Henderson
2021-10-25 14:17   ` Luis Fernando Fujita Pires
2021-10-25 17:31     ` Richard Henderson
2021-10-25 18:13       ` Luis Fernando Fujita Pires
2021-10-25 18:35         ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 35/48] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-25 14:17   ` Luis Fernando Fujita Pires
2021-10-21 21:05 ` [PATCH v3 36/48] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-25 14:26   ` Luis Fernando Fujita Pires
2021-10-25 17:46     ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 37/48] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-25 14:26   ` Luis Fernando Fujita Pires
2021-10-26 19:11   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 38/48] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-26 15:32   ` Alex Bennée
2021-10-26 18:50     ` Richard Henderson
2021-10-21 21:05 ` [PATCH v3 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-22 14:16   ` Philippe Mathieu-Daudé
2021-10-21 21:05 ` [PATCH v3 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
2021-10-26 16:21   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 41/48] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-26 16:27   ` Alex Bennée
2021-10-26 19:33     ` Richard Henderson
2021-10-27 13:22       ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 42/48] tcg/optimize: Add more simplifications for orc Richard Henderson
2021-10-26 16:22   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-26 16:35   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 44/48] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-26 16:34   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 45/48] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-26 16:35   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 46/48] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-26 16:36   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 47/48] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-26 16:36   ` Alex Bennée
2021-10-21 21:05 ` [PATCH v3 48/48] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-26 16:37   ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211021210539.825582-16-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=luis.pires@eldorado.org.br \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.