All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: luis.pires@eldorado.org.br, alex.bennee@linaro.org, f4bug@amsat.org
Subject: [PATCH v4 40/51] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
Date: Tue, 26 Oct 2021 16:09:32 -0700	[thread overview]
Message-ID: <20211026230943.1225890-41-richard.henderson@linaro.org> (raw)
In-Reply-To: <20211026230943.1225890-1-richard.henderson@linaro.org>

Rename to fold_addsub2.
Use Int128 to implement the wider operation.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 21 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 7597655a43..6189637dd8 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -24,6 +24,7 @@
  */
 
 #include "qemu/osdep.h"
+#include "qemu/int128.h"
 #include "tcg/tcg-op.h"
 #include "tcg-internal.h"
 
@@ -838,37 +839,59 @@ static bool fold_add(OptContext *ctx, TCGOp *op)
     return false;
 }
 
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
 {
     if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
         arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
-        uint32_t al = arg_info(op->args[2])->val;
-        uint32_t ah = arg_info(op->args[3])->val;
-        uint32_t bl = arg_info(op->args[4])->val;
-        uint32_t bh = arg_info(op->args[5])->val;
-        uint64_t a = ((uint64_t)ah << 32) | al;
-        uint64_t b = ((uint64_t)bh << 32) | bl;
+        uint64_t al = arg_info(op->args[2])->val;
+        uint64_t ah = arg_info(op->args[3])->val;
+        uint64_t bl = arg_info(op->args[4])->val;
+        uint64_t bh = arg_info(op->args[5])->val;
         TCGArg rl, rh;
-        TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+        TCGOp *op2;
 
-        if (add) {
-            a += b;
+        if (ctx->type == TCG_TYPE_I32) {
+            uint64_t a = deposit64(al, 32, 32, ah);
+            uint64_t b = deposit64(bl, 32, 32, bh);
+
+            if (add) {
+                a += b;
+            } else {
+                a -= b;
+            }
+
+            al = sextract64(a, 0, 32);
+            ah = sextract64(a, 32, 32);
         } else {
-            a -= b;
+            Int128 a = int128_make128(al, ah);
+            Int128 b = int128_make128(bl, bh);
+
+            if (add) {
+                a = int128_add(a, b);
+            } else {
+                a = int128_sub(a, b);
+            }
+
+            al = int128_getlo(a);
+            ah = int128_gethi(a);
         }
 
         rl = op->args[0];
         rh = op->args[1];
-        tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
-        tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
+
+        /* The proper opcode is supplied by tcg_opt_gen_mov. */
+        op2 = tcg_op_insert_before(ctx->tcg, op, 0);
+
+        tcg_opt_gen_movi(ctx, op, rl, al);
+        tcg_opt_gen_movi(ctx, op2, rh, ah);
         return true;
     }
     return false;
 }
 
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_add2(OptContext *ctx, TCGOp *op)
 {
-    return fold_addsub2_i32(ctx, op, true);
+    return fold_addsub2(ctx, op, true);
 }
 
 static bool fold_and(OptContext *ctx, TCGOp *op)
@@ -1725,9 +1748,9 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
     return false;
 }
 
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
 {
-    return fold_addsub2_i32(ctx, op, false);
+    return fold_addsub2(ctx, op, false);
 }
 
 static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
@@ -1873,8 +1896,8 @@ void tcg_optimize(TCGContext *s)
         CASE_OP_32_64_VEC(add):
             done = fold_add(&ctx, op);
             break;
-        case INDEX_op_add2_i32:
-            done = fold_add2_i32(&ctx, op);
+        CASE_OP_32_64(add2):
+            done = fold_add2(&ctx, op);
             break;
         CASE_OP_32_64_VEC(and):
             done = fold_and(&ctx, op);
@@ -2011,8 +2034,8 @@ void tcg_optimize(TCGContext *s)
         CASE_OP_32_64_VEC(sub):
             done = fold_sub(&ctx, op);
             break;
-        case INDEX_op_sub2_i32:
-            done = fold_sub2_i32(&ctx, op);
+        CASE_OP_32_64(sub2):
+            done = fold_sub2(&ctx, op);
             break;
         CASE_OP_32_64_VEC(xor):
             done = fold_xor(&ctx, op);
-- 
2.25.1



  parent reply	other threads:[~2021-10-26 23:40 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 23:08 [PATCH v4 00/51] tcg: optimize redundant sign extensions Richard Henderson
2021-10-26 23:08 ` [PATCH v4 01/51] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
2021-10-26 23:08 ` [PATCH v4 02/51] tcg/optimize: Split out OptContext Richard Henderson
2021-10-26 23:08 ` [PATCH v4 03/51] tcg/optimize: Remove do_default label Richard Henderson
2021-10-26 23:08 ` [PATCH v4 04/51] tcg/optimize: Change tcg_opt_gen_{mov, movi} interface Richard Henderson
2021-10-26 23:08 ` [PATCH v4 05/51] tcg/optimize: Move prev_mb into OptContext Richard Henderson
2021-10-26 23:08 ` [PATCH v4 06/51] tcg/optimize: Split out init_arguments Richard Henderson
2021-10-26 23:08 ` [PATCH v4 07/51] tcg/optimize: Split out copy_propagate Richard Henderson
2021-10-26 23:09 ` [PATCH v4 08/51] tcg/optimize: Split out fold_call Richard Henderson
2021-10-26 23:09 ` [PATCH v4 09/51] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
2021-10-26 23:09 ` [PATCH v4 10/51] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
2021-10-26 23:09 ` [PATCH v4 11/51] tcg/optimize: Return true from tcg_opt_gen_{mov, movi} Richard Henderson
2021-10-26 23:09 ` [PATCH v4 12/51] tcg/optimize: Split out finish_folding Richard Henderson
2021-10-26 23:09 ` [PATCH v4 13/51] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
2021-10-26 23:09 ` [PATCH v4 14/51] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
2021-10-26 23:09 ` [PATCH v4 15/51] tcg/optimize: Split out fold_const{1,2} Richard Henderson
2021-10-26 23:09 ` [PATCH v4 16/51] tcg/optimize: Split out fold_setcond2 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 17/51] tcg/optimize: Split out fold_brcond2 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 18/51] tcg/optimize: Split out fold_brcond Richard Henderson
2021-10-26 23:09 ` [PATCH v4 19/51] tcg/optimize: Split out fold_setcond Richard Henderson
2021-10-26 23:09 ` [PATCH v4 20/51] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 21/51] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 22/51] tcg/optimize: Split out fold_movcond Richard Henderson
2021-10-26 23:09 ` [PATCH v4 23/51] tcg/optimize: Split out fold_extract2 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 24/51] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
2021-10-26 23:09 ` [PATCH v4 25/51] tcg/optimize: Split out fold_deposit Richard Henderson
2021-10-26 23:09 ` [PATCH v4 26/51] tcg/optimize: Split out fold_count_zeros Richard Henderson
2021-10-26 23:09 ` [PATCH v4 27/51] tcg/optimize: Split out fold_bswap Richard Henderson
2021-10-26 23:09 ` [PATCH v4 28/51] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
2021-10-26 23:09 ` [PATCH v4 29/51] tcg/optimize: Split out fold_mov Richard Henderson
2021-10-26 23:09 ` [PATCH v4 30/51] tcg/optimize: Split out fold_xx_to_i Richard Henderson
2021-10-26 23:09 ` [PATCH v4 31/51] tcg/optimize: Split out fold_xx_to_x Richard Henderson
2021-10-26 23:09 ` [PATCH v4 32/51] tcg/optimize: Split out fold_xi_to_i Richard Henderson
2021-10-26 23:09 ` [PATCH v4 33/51] tcg/optimize: Add type to OptContext Richard Henderson
2021-10-26 23:09 ` [PATCH v4 34/51] tcg/optimize: Split out fold_to_not Richard Henderson
2021-10-27 13:32   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 35/51] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
2021-10-26 23:09 ` [PATCH v4 36/51] tcg/optimize: Split out fold_xi_to_x Richard Henderson
2021-10-27 13:32   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 37/51] tcg/optimize: Split out fold_ix_to_i Richard Henderson
2021-10-26 23:09 ` [PATCH v4 38/51] tcg/optimize: Split out fold_masks Richard Henderson
2021-10-27 13:37   ` Luis Fernando Fujita Pires
2021-10-27 16:35     ` Richard Henderson
2021-10-26 23:09 ` [PATCH v4 39/51] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
2021-10-27 20:28   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` Richard Henderson [this message]
2021-10-27 20:28   ` [PATCH v4 40/51] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 41/51] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
2021-10-27 20:32   ` Luis Fernando Fujita Pires
2021-10-28  0:03     ` Richard Henderson
2021-10-26 23:09 ` [PATCH v4 42/51] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
2021-10-27 20:32   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 43/51] tcg/optimize: Use fold_xx_to_i for orc Richard Henderson
2021-10-27 14:07   ` Philippe Mathieu-Daudé
2021-10-27 20:36   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 44/51] tcg/optimize: Use fold_xi_to_x for mul Richard Henderson
2021-10-27 13:56   ` Philippe Mathieu-Daudé
2021-10-27 20:37   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 45/51] tcg/optimize: Use fold_xi_to_x for div Richard Henderson
2021-10-27 13:51   ` Philippe Mathieu-Daudé
2021-10-27 20:37   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 46/51] tcg/optimize: Use fold_xx_to_i for rem Richard Henderson
2021-10-27 13:51   ` Philippe Mathieu-Daudé
2021-10-27 20:42   ` Luis Fernando Fujita Pires
2021-10-28  0:08     ` Richard Henderson
2021-10-26 23:09 ` [PATCH v4 47/51] tcg/optimize: Optimize sign extensions Richard Henderson
2021-10-27 20:42   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 48/51] tcg/optimize: Propagate sign info for logical operations Richard Henderson
2021-10-27 20:43   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 49/51] tcg/optimize: Propagate sign info for setcond Richard Henderson
2021-10-27 20:43   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 50/51] tcg/optimize: Propagate sign info for bit counting Richard Henderson
2021-10-27 20:43   ` Luis Fernando Fujita Pires
2021-10-26 23:09 ` [PATCH v4 51/51] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-27 20:44   ` Luis Fernando Fujita Pires

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211026230943.1225890-41-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=f4bug@amsat.org \
    --cc=luis.pires@eldorado.org.br \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.