qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PATCH v2 15/48] tcg/optimize: Split out fold_const{1,2}
Date: Thu,  7 Oct 2021 12:54:23 -0700	[thread overview]
Message-ID: <20211007195456.1168070-16-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211007195456.1168070-1-richard.henderson@linaro.org>

Split out a whole bunch of placeholder functions, which are
currently identical.  That won't last as more code gets moved.

Use CASE_32_64_VEC for some logical operators that previously
missed the addition of vectors.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/optimize.c | 254 +++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 202 insertions(+), 52 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 159a5a9ee5..e66d935808 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -660,6 +660,60 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
     }
 }
 
+/*
+ * The fold_* functions return true when processing is complete,
+ * usually by folding the operation to a constant or to a copy,
+ * and calling tcg_opt_gen_{mov,movi}.  They may do other things,
+ * like collect information about the value produced, for use in
+ * optimizing a subsequent operation.
+ *
+ * These first fold_* functions are all helpers, used by other
+ * folders for more specific operations.
+ */
+
+static bool fold_const1(OptContext *ctx, TCGOp *op)
+{
+    if (arg_is_const(op->args[1])) {
+        uint64_t t;
+
+        t = arg_info(op->args[1])->val;
+        t = do_constant_folding(op->opc, t, 0);
+        return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+    }
+    return false;
+}
+
+static bool fold_const2(OptContext *ctx, TCGOp *op)
+{
+    if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+        uint64_t t1 = arg_info(op->args[1])->val;
+        uint64_t t2 = arg_info(op->args[2])->val;
+
+        t1 = do_constant_folding(op->opc, t1, t2);
+        return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+    }
+    return false;
+}
+
+/*
+ * These outermost fold_<op> functions are sorted alphabetically.
+ */
+
+static bool fold_add(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_and(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_andc(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 static bool fold_call(OptContext *ctx, TCGOp *op)
 {
     TCGContext *s = ctx->tcg;
@@ -692,6 +746,26 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
     return true;
 }
 
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_exts(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_extu(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
 static bool fold_mb(OptContext *ctx, TCGOp *op)
 {
     /* Eliminate duplicate and redundant fence instructions.  */
@@ -716,6 +790,41 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
     return true;
 }
 
+static bool fold_multiply(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_nand(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_neg(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_nor(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_not(OptContext *ctx, TCGOp *op)
+{
+    return fold_const1(ctx, op);
+}
+
+static bool fold_or(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_orc(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
 {
     /* Opcodes that touch guest memory stop the mb optimization.  */
@@ -730,6 +839,21 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
     return false;
 }
 
+static bool fold_shift(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
+static bool fold_xor(OptContext *ctx, TCGOp *op)
+{
+    return fold_const2(ctx, op);
+}
+
 /* Propagate constants and copies, fold constant expressions. */
 void tcg_optimize(TCGContext *s)
 {
@@ -1276,26 +1400,6 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
-        CASE_OP_32_64(not):
-        CASE_OP_32_64(neg):
-        CASE_OP_32_64(ext8s):
-        CASE_OP_32_64(ext8u):
-        CASE_OP_32_64(ext16s):
-        CASE_OP_32_64(ext16u):
-        CASE_OP_32_64(ctpop):
-        case INDEX_op_ext32s_i64:
-        case INDEX_op_ext32u_i64:
-        case INDEX_op_ext_i32_i64:
-        case INDEX_op_extu_i32_i64:
-        case INDEX_op_extrl_i64_i32:
-        case INDEX_op_extrh_i64_i32:
-            if (arg_is_const(op->args[1])) {
-                tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
-                tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
-                continue;
-            }
-            break;
-
         CASE_OP_32_64(bswap16):
         CASE_OP_32_64(bswap32):
         case INDEX_op_bswap64_i64:
@@ -1307,36 +1411,6 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
-        CASE_OP_32_64(add):
-        CASE_OP_32_64(sub):
-        CASE_OP_32_64(mul):
-        CASE_OP_32_64(or):
-        CASE_OP_32_64(and):
-        CASE_OP_32_64(xor):
-        CASE_OP_32_64(shl):
-        CASE_OP_32_64(shr):
-        CASE_OP_32_64(sar):
-        CASE_OP_32_64(rotl):
-        CASE_OP_32_64(rotr):
-        CASE_OP_32_64(andc):
-        CASE_OP_32_64(orc):
-        CASE_OP_32_64(eqv):
-        CASE_OP_32_64(nand):
-        CASE_OP_32_64(nor):
-        CASE_OP_32_64(muluh):
-        CASE_OP_32_64(mulsh):
-        CASE_OP_32_64(div):
-        CASE_OP_32_64(divu):
-        CASE_OP_32_64(rem):
-        CASE_OP_32_64(remu):
-            if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
-                tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
-                                          arg_info(op->args[2])->val);
-                tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
-                continue;
-            }
-            break;
-
         CASE_OP_32_64(clz):
         CASE_OP_32_64(ctz):
             if (arg_is_const(op->args[1])) {
@@ -1637,9 +1711,71 @@ void tcg_optimize(TCGContext *s)
             }
             break;
 
+        default:
+            break;
+
+        /* ---------------------------------------------------------- */
+        /* Sorted alphabetically by opcode as much as possible. */
+
+        CASE_OP_32_64_VEC(add):
+            done = fold_add(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(and):
+            done = fold_and(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(andc):
+            done = fold_andc(&ctx, op);
+            break;
+        CASE_OP_32_64(ctpop):
+            done = fold_ctpop(&ctx, op);
+            break;
+        CASE_OP_32_64(div):
+        CASE_OP_32_64(divu):
+            done = fold_const2(&ctx, op);
+            break;
+        CASE_OP_32_64(eqv):
+            done = fold_eqv(&ctx, op);
+            break;
+        CASE_OP_32_64(ext8s):
+        CASE_OP_32_64(ext16s):
+        case INDEX_op_ext32s_i64:
+        case INDEX_op_ext_i32_i64:
+            done = fold_exts(&ctx, op);
+            break;
+        CASE_OP_32_64(ext8u):
+        CASE_OP_32_64(ext16u):
+        case INDEX_op_ext32u_i64:
+        case INDEX_op_extu_i32_i64:
+        case INDEX_op_extrl_i64_i32:
+        case INDEX_op_extrh_i64_i32:
+            done = fold_extu(&ctx, op);
+            break;
         case INDEX_op_mb:
             done = fold_mb(&ctx, op);
             break;
+        CASE_OP_32_64(mul):
+        CASE_OP_32_64(mulsh):
+        CASE_OP_32_64(muluh):
+            done = fold_multiply(&ctx, op);
+            break;
+        CASE_OP_32_64(nand):
+            done = fold_nand(&ctx, op);
+            break;
+        CASE_OP_32_64(neg):
+            done = fold_neg(&ctx, op);
+            break;
+        CASE_OP_32_64(nor):
+            done = fold_nor(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(not):
+            done = fold_not(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(or):
+            done = fold_or(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(orc):
+            done = fold_orc(&ctx, op);
+            break;
         case INDEX_op_qemu_ld_i32:
         case INDEX_op_qemu_ld_i64:
             done = fold_qemu_ld(&ctx, op);
@@ -1649,8 +1785,22 @@ void tcg_optimize(TCGContext *s)
         case INDEX_op_qemu_st_i64:
             done = fold_qemu_st(&ctx, op);
             break;
-
-        default:
+        CASE_OP_32_64(rem):
+        CASE_OP_32_64(remu):
+            done = fold_const2(&ctx, op);
+            break;
+        CASE_OP_32_64(rotl):
+        CASE_OP_32_64(rotr):
+        CASE_OP_32_64(sar):
+        CASE_OP_32_64(shl):
+        CASE_OP_32_64(shr):
+            done = fold_shift(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(sub):
+            done = fold_sub(&ctx, op);
+            break;
+        CASE_OP_32_64_VEC(xor):
+            done = fold_xor(&ctx, op);
             break;
         }
 
-- 
2.25.1



  parent reply	other threads:[~2021-10-07 20:19 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-07 19:54 [PATCH v2 00/48] tcg: optimize redundant sign extensions Richard Henderson
2021-10-07 19:54 ` [PATCH v2 01/48] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-19 15:23   ` Alex Bennée
2021-10-20 22:26   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 02/48] tcg/optimize: Split out OptContext Richard Henderson
2021-10-19 15:25   ` Alex Bennée
2021-10-19 15:37     ` Richard Henderson
2021-10-20 22:26   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 03/48] tcg/optimize: Remove do_default label Richard Henderson
2021-10-19 15:37   ` Alex Bennée
2021-10-20 22:26   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 04/48] tcg/optimize: Change tcg_opt_gen_{mov, movi} interface Richard Henderson
2021-10-19 15:38   ` Alex Bennée
2021-10-20 22:26   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 05/48] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-19 15:44   ` Alex Bennée
2021-10-19 15:59     ` Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 06/48] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-19 16:13   ` Alex Bennée
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 07/48] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-20 16:01   ` Alex Bennée
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 08/48] tcg/optimize: Split out fold_call Richard Henderson
2021-10-20 16:05   ` Alex Bennée
2021-10-21  2:04     ` Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 09/48] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-20 16:17   ` Alex Bennée
2021-10-21  2:09     ` Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 10/48] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov, movi} Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 12/48] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 13/48] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 14/48] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` Richard Henderson [this message]
2021-10-20 22:27   ` [PATCH v2 15/48] tcg/optimize: Split out fold_const{1,2} Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 16/48] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 17/48] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-21  2:32     ` Richard Henderson
2021-10-07 19:54 ` [PATCH v2 18/48] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 19/48] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-20 22:27   ` Luis Fernando Fujita Pires
2021-10-07 19:54 ` [PATCH v2 20/48] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-07 19:54 ` [PATCH v2 21/48] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-07 19:54 ` [PATCH v2 22/48] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-07 19:54 ` [PATCH v2 23/48] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-10-07 19:54 ` [PATCH v2 24/48] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-07 19:54 ` [PATCH v2 25/48] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-07 19:54 ` [PATCH v2 26/48] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-07 19:54 ` [PATCH v2 27/48] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-07 19:54 ` [PATCH v2 28/48] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-07 19:54 ` [PATCH v2 29/48] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-07 19:54 ` [PATCH v2 30/48] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-07 19:54 ` [PATCH v2 31/48] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-07 19:54 ` [PATCH v2 32/48] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-07 19:54 ` [PATCH v2 33/48] tcg/optimize: Add type to OptContext Richard Henderson
2021-10-07 19:54 ` [PATCH v2 34/48] tcg/optimize: Split out fold_to_not Richard Henderson
2021-10-07 19:54 ` [PATCH v2 35/48] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-07 19:54 ` [PATCH v2 36/48] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-07 19:54 ` [PATCH v2 37/48] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-07 19:54 ` [PATCH v2 38/48] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-07 19:54 ` [PATCH v2 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-07 19:54 ` [PATCH v2 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
2021-10-19 15:34   ` Alex Bennée
2021-10-19 16:01     ` Richard Henderson
2021-10-07 19:54 ` [PATCH v2 41/48] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-07 19:54 ` [PATCH v2 42/48] tcg/optimize: Add more simplifications for orc Richard Henderson
2021-10-07 19:54 ` [PATCH v2 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-07 19:54 ` [PATCH v2 44/48] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-07 19:54 ` [PATCH v2 45/48] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-07 19:54 ` [PATCH v2 46/48] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-07 19:54 ` [PATCH v2 47/48] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-07 19:54 ` [PATCH v2 48/48] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-15 23:41 ` [PATCH v2 00/48] tcg: optimize redundant sign extensions Richard Henderson
2021-10-20 16:13 ` Alex Bennée
2021-10-20 23:50   ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211007195456.1168070-16-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).