* [PULL 01/56] qemu/int128: Add int128_{not,xor}
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 02/56] host-utils: move checks out of divu128/divs128 Richard Henderson
` (55 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Frédéric Pétrot, Fabien Portas
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
Addition of not and xor on 128-bit integers.
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
[rth: Split out logical operations.]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/qemu/int128.h | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index 2ac0746426..b6d517aea4 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -58,6 +58,11 @@ static inline Int128 int128_exts64(int64_t a)
return a;
}
+static inline Int128 int128_not(Int128 a)
+{
+ return ~a;
+}
+
static inline Int128 int128_and(Int128 a, Int128 b)
{
return a & b;
@@ -68,6 +73,11 @@ static inline Int128 int128_or(Int128 a, Int128 b)
return a | b;
}
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return a ^ b;
+}
+
static inline Int128 int128_rshift(Int128 a, int n)
{
return a >> n;
@@ -235,6 +245,11 @@ static inline Int128 int128_exts64(int64_t a)
return int128_make128(a, (a < 0) ? -1 : 0);
}
+static inline Int128 int128_not(Int128 a)
+{
+ return int128_make128(~a.lo, ~a.hi);
+}
+
static inline Int128 int128_and(Int128 a, Int128 b)
{
return int128_make128(a.lo & b.lo, a.hi & b.hi);
@@ -245,6 +260,11 @@ static inline Int128 int128_or(Int128 a, Int128 b)
return int128_make128(a.lo | b.lo, a.hi | b.hi);
}
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
+}
+
static inline Int128 int128_rshift(Int128 a, int n)
{
int64_t h;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 02/56] host-utils: move checks out of divu128/divs128
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
2021-10-28 2:40 ` [PULL 01/56] qemu/int128: Add int128_{not,xor} Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils Richard Henderson
` (54 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
From: Luis Pires <luis.pires@eldorado.org.br>
In preparation for changing the divu128/divs128 implementations
to allow for quotients larger than 64 bits, move the div-by-zero
and overflow checks to the callers.
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/hw/clock.h | 5 +++--
include/qemu/host-utils.h | 34 ++++++++++++---------------------
target/ppc/int_helper.c | 14 +++++++++-----
util/host-utils.c | 40 ++++++++++++++++++---------------------
4 files changed, 42 insertions(+), 51 deletions(-)
diff --git a/include/hw/clock.h b/include/hw/clock.h
index 11f67fb970..7443e6c4ab 100644
--- a/include/hw/clock.h
+++ b/include/hw/clock.h
@@ -324,8 +324,9 @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
return 0;
}
/*
- * Ignore divu128() return value as we've caught div-by-zero and don't
- * need different behaviour for overflow.
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
+ * divu128 does not return a valid truncated quotient, so the result will
+ * be wrong.
*/
divu128(&lo, &hi, clk->period);
return lo;
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index ca9f3f021b..e82e6239af 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -52,36 +52,26 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
return (__int128_t)a * b / c;
}
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
{
- if (divisor == 0) {
- return 1;
- } else {
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
- __uint128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result > UINT64_MAX;
- }
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
+ __uint128_t result = dividend / divisor;
+ *plow = result;
+ *phigh = dividend % divisor;
}
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
{
- if (divisor == 0) {
- return 1;
- } else {
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
- __int128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result != *plow;
- }
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
+ __int128_t result = dividend / divisor;
+ *plow = result;
+ *phigh = dividend % divisor;
}
#else
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
{
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index f5dac3aa87..510faf24cf 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -104,10 +104,11 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
uint64_t rt = 0;
int overflow = 0;
- overflow = divu128(&rt, &ra, rb);
-
- if (unlikely(overflow)) {
+ if (unlikely(rb == 0 || ra >= rb)) {
+ overflow = 1;
rt = 0; /* Undefined */
+ } else {
+ divu128(&rt, &ra, rb);
}
if (oe) {
@@ -122,10 +123,13 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
int64_t rt = 0;
int64_t ra = (int64_t)rau;
int64_t rb = (int64_t)rbu;
- int overflow = divs128(&rt, &ra, rb);
+ int overflow = 0;
- if (unlikely(overflow)) {
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
+ overflow = 1;
rt = 0; /* Undefined */
+ } else {
+ divs128(&rt, &ra, rb);
}
if (oe) {
diff --git a/util/host-utils.c b/util/host-utils.c
index a789a11b46..701a371843 100644
--- a/util/host-utils.c
+++ b/util/host-utils.c
@@ -86,24 +86,23 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
*phigh = rh;
}
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
-/* remainder via phigh. */
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+/*
+ * Unsigned 128-by-64 division. Returns quotient via plow and
+ * remainder via phigh.
+ * The result must fit in 64 bits (plow) - otherwise, the result
+ * is undefined.
+ * This function will cause a division by zero if passed a zero divisor.
+ */
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
{
uint64_t dhi = *phigh;
uint64_t dlo = *plow;
unsigned i;
uint64_t carry = 0;
- if (divisor == 0) {
- return 1;
- } else if (dhi == 0) {
+ if (divisor == 0 || dhi == 0) {
*plow = dlo / divisor;
*phigh = dlo % divisor;
- return 0;
- } else if (dhi >= divisor) {
- return 1;
} else {
for (i = 0; i < 64; i++) {
@@ -120,15 +119,20 @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
*plow = dlo;
*phigh = dhi;
- return 0;
}
}
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+/*
+ * Signed 128-by-64 division. Returns quotient via plow and
+ * remainder via phigh.
+ * The result must fit in 64 bits (plow) - otherwise, the result
+ * is undefined.
+ * This function will cause a division by zero if passed a zero divisor.
+ */
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
{
int sgn_dvdnd = *phigh < 0;
int sgn_divsr = divisor < 0;
- int overflow = 0;
if (sgn_dvdnd) {
*plow = ~(*plow);
@@ -145,19 +149,11 @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
divisor = 0 - divisor;
}
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
if (sgn_dvdnd ^ sgn_divsr) {
*plow = 0 - *plow;
}
-
- if (!overflow) {
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
- overflow = 1;
- }
- }
-
- return overflow;
}
#endif
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
2021-10-28 2:40 ` [PULL 01/56] qemu/int128: Add int128_{not,xor} Richard Henderson
2021-10-28 2:40 ` [PULL 02/56] host-utils: move checks out of divu128/divs128 Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128 Richard Henderson
` (53 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
From: Luis Pires <luis.pires@eldorado.org.br>
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
so it can be reused by divu128().
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/fpu/softfloat-macros.h | 82 ----------------------------------
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
2 files changed, 81 insertions(+), 82 deletions(-)
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
index 81c3fe8256..f35cdbfa63 100644
--- a/include/fpu/softfloat-macros.h
+++ b/include/fpu/softfloat-macros.h
@@ -8,7 +8,6 @@
* so some portions are provided under:
* the SoftFloat-2a license
* the BSD license
- * GPL-v2-or-later
*
* Any future contributions to this file after December 1st 2014 will be
* taken to be licensed under the Softfloat-2a license unless specifically
@@ -75,10 +74,6 @@ this code that are retained.
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* Portions of this work are licensed under the terms of the GNU GPL,
- * version 2 or later. See the COPYING file in the top-level directory.
- */
-
#ifndef FPU_SOFTFLOAT_MACROS_H
#define FPU_SOFTFLOAT_MACROS_H
@@ -585,83 +580,6 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
}
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
- *
- * Licensed under the GPLv2/LGPLv3
- */
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
- uint64_t n0, uint64_t d)
-{
-#if defined(__x86_64__)
- uint64_t q;
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
- return q;
-#elif defined(__s390x__) && !defined(__clang__)
- /* Need to use a TImode type to get an even register pair for DLGR. */
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
- *r = n >> 64;
- return n;
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
- /* From Power ISA 2.06, programming note for divdeu. */
- uint64_t q1, q2, Q, r1, r2, R;
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
- : "=&r"(q1), "=r"(q2)
- : "r"(n1), "r"(n0), "r"(d));
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
- r2 = n0 - (q2 * d);
- Q = q1 + q2;
- R = r1 + r2;
- if (R >= d || R < r2) { /* overflow implies R > d */
- Q += 1;
- R -= d;
- }
- *r = R;
- return Q;
-#else
- uint64_t d0, d1, q0, q1, r1, r0, m;
-
- d0 = (uint32_t)d;
- d1 = d >> 32;
-
- r1 = n1 % d1;
- q1 = n1 / d1;
- m = q1 * d0;
- r1 = (r1 << 32) | (n0 >> 32);
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- if (r1 >= d) {
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- }
- }
- }
- r1 -= m;
-
- r0 = r1 % d1;
- q0 = r1 / d1;
- m = q0 * d0;
- r0 = (r0 << 32) | (uint32_t)n0;
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- if (r0 >= d) {
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- }
- }
- }
- r0 -= m;
-
- *r = r0;
- return (q1 << 32) | q0;
-#endif
-}
-
/*----------------------------------------------------------------------------
| Returns an approximation to the square root of the 32-bit significand given
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index e82e6239af..08a17e16e5 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -23,6 +23,10 @@
* THE SOFTWARE.
*/
+/* Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
#ifndef HOST_UTILS_H
#define HOST_UTILS_H
@@ -726,4 +730,81 @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
*/
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
+ *
+ * Licensed under the GPLv2/LGPLv3
+ */
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
+ uint64_t n0, uint64_t d)
+{
+#if defined(__x86_64__)
+ uint64_t q;
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
+ return q;
+#elif defined(__s390x__) && !defined(__clang__)
+ /* Need to use a TImode type to get an even register pair for DLGR. */
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
+ *r = n >> 64;
+ return n;
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
+ /* From Power ISA 2.06, programming note for divdeu. */
+ uint64_t q1, q2, Q, r1, r2, R;
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
+ : "=&r"(q1), "=r"(q2)
+ : "r"(n1), "r"(n0), "r"(d));
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
+ r2 = n0 - (q2 * d);
+ Q = q1 + q2;
+ R = r1 + r2;
+ if (R >= d || R < r2) { /* overflow implies R > d */
+ Q += 1;
+ R -= d;
+ }
+ *r = R;
+ return Q;
+#else
+ uint64_t d0, d1, q0, q1, r1, r0, m;
+
+ d0 = (uint32_t)d;
+ d1 = d >> 32;
+
+ r1 = n1 % d1;
+ q1 = n1 / d1;
+ m = q1 * d0;
+ r1 = (r1 << 32) | (n0 >> 32);
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ if (r1 >= d) {
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ }
+ }
+ }
+ r1 -= m;
+
+ r0 = r1 % d1;
+ q0 = r1 / d1;
+ m = q0 * d0;
+ r0 = (r0 << 32) | (uint32_t)n0;
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ if (r0 >= d) {
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ }
+ }
+ }
+ r0 -= m;
+
+ *r = r0;
+ return (q1 << 32) | q0;
+#endif
+}
+
#endif
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (2 preceding siblings ...)
2021-10-28 2:40 ` [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 05/56] host-utils: add unit tests for divu128/divs128 Richard Henderson
` (52 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
From: Luis Pires <luis.pires@eldorado.org.br>
These will be used to implement new decimal floating point
instructions from Power ISA 3.1.
The remainder is now returned directly by divu128/divs128,
freeing up phigh to receive the high 64 bits of the quotient.
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/hw/clock.h | 6 +-
include/qemu/host-utils.h | 20 ++++--
target/ppc/int_helper.c | 9 +--
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
4 files changed, 108 insertions(+), 60 deletions(-)
diff --git a/include/hw/clock.h b/include/hw/clock.h
index 7443e6c4ab..5c927cee7f 100644
--- a/include/hw/clock.h
+++ b/include/hw/clock.h
@@ -323,11 +323,7 @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
if (clk->period == 0) {
return 0;
}
- /*
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
- * divu128 does not return a valid truncated quotient, so the result will
- * be wrong.
- */
+
divu128(&lo, &hi, clk->period);
return lo;
}
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index 08a17e16e5..a3a7ced78d 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -56,26 +56,32 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
return (__int128_t)a * b / c;
}
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
+ uint64_t divisor)
{
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
__uint128_t result = dividend / divisor;
+
*plow = result;
- *phigh = dividend % divisor;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
+ int64_t divisor)
{
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
__int128_t result = dividend / divisor;
+
*plow = result;
- *phigh = dividend % divisor;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
#else
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
{
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 510faf24cf..eeb7781a9e 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -120,7 +120,7 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
{
- int64_t rt = 0;
+ uint64_t rt = 0;
int64_t ra = (int64_t)rau;
int64_t rb = (int64_t)rbu;
int overflow = 0;
@@ -2506,6 +2506,7 @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
int cr;
uint64_t lo_value;
uint64_t hi_value;
+ uint64_t rem;
ppc_avr_t ret = { .u64 = { 0, 0 } };
if (b->VsrSD(0) < 0) {
@@ -2541,10 +2542,10 @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
* In that case, we leave r unchanged.
*/
} else {
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
- for (i = 1; i < 16; hi_value /= 10, i++) {
- bcd_put_digit(&ret, hi_value % 10, i);
+ for (i = 1; i < 16; rem /= 10, i++) {
+ bcd_put_digit(&ret, rem % 10, i);
}
for (; i < 32; lo_value /= 10, i++) {
diff --git a/util/host-utils.c b/util/host-utils.c
index 701a371843..bcc772b8ec 100644
--- a/util/host-utils.c
+++ b/util/host-utils.c
@@ -87,72 +87,117 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
}
/*
- * Unsigned 128-by-64 division. Returns quotient via plow and
- * remainder via phigh.
- * The result must fit in 64 bits (plow) - otherwise, the result
- * is undefined.
- * This function will cause a division by zero if passed a zero divisor.
+ * Unsigned 128-by-64 division.
+ * Returns the remainder.
+ * Returns quotient via plow and phigh.
+ * Also returns the remainder via the function return value.
*/
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
{
uint64_t dhi = *phigh;
uint64_t dlo = *plow;
- unsigned i;
- uint64_t carry = 0;
+ uint64_t rem, dhighest;
+ int sh;
if (divisor == 0 || dhi == 0) {
*plow = dlo / divisor;
- *phigh = dlo % divisor;
+ *phigh = 0;
+ return dlo % divisor;
} else {
+ sh = clz64(divisor);
- for (i = 0; i < 64; i++) {
- carry = dhi >> 63;
- dhi = (dhi << 1) | (dlo >> 63);
- if (carry || (dhi >= divisor)) {
- dhi -= divisor;
- carry = 1;
- } else {
- carry = 0;
+ if (dhi < divisor) {
+ if (sh != 0) {
+ /* normalize the divisor, shifting the dividend accordingly */
+ divisor <<= sh;
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
+ dlo <<= sh;
}
- dlo = (dlo << 1) | carry;
+
+ *phigh = 0;
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
+ } else {
+ if (sh != 0) {
+ /* normalize the divisor, shifting the dividend accordingly */
+ divisor <<= sh;
+ dhighest = dhi >> (64 - sh);
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
+ dlo <<= sh;
+
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
+ } else {
+ /**
+ * dhi >= divisor
+ * Since the MSB of divisor is set (sh == 0),
+ * (dhi - divisor) < divisor
+ *
+ * Thus, the high part of the quotient is 1, and we can
+ * calculate the low part with a single call to udiv_qrnnd
+ * after subtracting divisor from dhi
+ */
+ dhi -= divisor;
+ *phigh = 1;
+ }
+
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
}
- *plow = dlo;
- *phigh = dhi;
+ /*
+ * since the dividend/divisor might have been normalized,
+ * the remainder might also have to be shifted back
+ */
+ return rem >> sh;
}
}
/*
- * Signed 128-by-64 division. Returns quotient via plow and
- * remainder via phigh.
- * The result must fit in 64 bits (plow) - otherwise, the result
- * is undefined.
- * This function will cause a division by zero if passed a zero divisor.
+ * Signed 128-by-64 division.
+ * Returns quotient via plow and phigh.
+ * Also returns the remainder via the function return value.
*/
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
{
- int sgn_dvdnd = *phigh < 0;
- int sgn_divsr = divisor < 0;
+ bool neg_quotient = false, neg_remainder = false;
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
+ uint64_t rem;
- if (sgn_dvdnd) {
- *plow = ~(*plow);
- *phigh = ~(*phigh);
- if (*plow == (int64_t)-1) {
+ if (*phigh < 0) {
+ neg_quotient = !neg_quotient;
+ neg_remainder = !neg_remainder;
+
+ if (unsig_lo == 0) {
+ unsig_hi = -unsig_hi;
+ } else {
+ unsig_hi = ~unsig_hi;
+ unsig_lo = -unsig_lo;
+ }
+ }
+
+ if (divisor < 0) {
+ neg_quotient = !neg_quotient;
+
+ divisor = -divisor;
+ }
+
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
+
+ if (neg_quotient) {
+ if (unsig_lo == 0) {
+ *phigh = -unsig_hi;
*plow = 0;
- (*phigh)++;
- } else {
- (*plow)++;
- }
+ } else {
+ *phigh = ~unsig_hi;
+ *plow = -unsig_lo;
+ }
+ } else {
+ *phigh = unsig_hi;
+ *plow = unsig_lo;
}
- if (sgn_divsr) {
- divisor = 0 - divisor;
- }
-
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
-
- if (sgn_dvdnd ^ sgn_divsr) {
- *plow = 0 - *plow;
+ if (neg_remainder) {
+ return -rem;
+ } else {
+ return rem;
}
}
#endif
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 05/56] host-utils: add unit tests for divu128/divs128
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (3 preceding siblings ...)
2021-10-28 2:40 ` [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128 Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
` (51 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
From: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
tests/unit/meson.build | 1 +
2 files changed, 198 insertions(+)
create mode 100644 tests/unit/test-div128.c
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
new file mode 100644
index 0000000000..0bc25fe4a8
--- /dev/null
+++ b/tests/unit/test-div128.c
@@ -0,0 +1,197 @@
+/*
+ * Test 128-bit division functions
+ *
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+
+typedef struct {
+ uint64_t high;
+ uint64_t low;
+ uint64_t rhigh;
+ uint64_t rlow;
+ uint64_t divisor;
+ uint64_t remainder;
+} test_data_unsigned;
+
+typedef struct {
+ int64_t high;
+ uint64_t low;
+ int64_t rhigh;
+ uint64_t rlow;
+ int64_t divisor;
+ int64_t remainder;
+} test_data_signed;
+
+static const test_data_unsigned test_table_unsigned[] = {
+ /* Dividend fits in 64 bits */
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
+
+ /* Dividend > 64 bits, with MSB 0 */
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
+ 0x0000000000000001ULL, 0x000000000000000dULL,
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
+
+ /* Dividend > 64 bits, with MSB 1 */
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
+ 0x0000000000000010ULL, 0x000000000000000fULL},
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
+
+ /**
+ * Divisor == 64 bits, with MSB 1
+ * and high 64 bits of dividend >= divisor
+ * (for testing normalization)
+ */
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
+
+ /* Dividend > 64 bits, divisor almost as big */
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
+ 0x0000000000000000ULL, 0x000000000000000fULL,
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
+};
+
+static const test_data_signed test_table_signed[] = {
+ /* Positive dividend, positive/negative divisors */
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0x0000000000000001LL, 0x0000000000000000LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
+ 0x0000000000000002LL, 0x0000000000000000LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
+ 0x0000000000000008LL, 0x0000000000000006LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0x0000000000000000LL, 0x000000000000550dULL,
+ 0x0000000000000237LL, 0x0000000000000183LL},
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
+
+ /* Negative dividend, positive/negative divisors */
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0x0000000000000001LL, 0x0000000000000000LL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
+ 0x0000000000000002LL, 0x0000000000000000LL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
+ 0x0000000000000000LL, 0x000000000000550dULL,
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
+};
+
+static void test_divu128(void)
+{
+ int i;
+ uint64_t rem;
+ test_data_unsigned tmp;
+
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
+ tmp = test_table_unsigned[i];
+
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
+ g_assert_cmpuint(rem, ==, tmp.remainder);
+ }
+}
+
+static void test_divs128(void)
+{
+ int i;
+ int64_t rem;
+ test_data_signed tmp;
+
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
+ tmp = test_table_signed[i];
+
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
+ g_assert_cmpuint(rem, ==, tmp.remainder);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
+ return g_test_run();
+}
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
index 7c297d7e5c..5ac2d9e943 100644
--- a/tests/unit/meson.build
+++ b/tests/unit/meson.build
@@ -23,6 +23,7 @@ tests = {
# all code tested by test-x86-cpuid is inside topology.h
'test-x86-cpuid': [],
'test-cutils': [],
+ 'test-div128': [],
'test-shift128': [],
'test-mul64': [],
# all code tested by test-int128 is inside int128.h
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask"
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (4 preceding siblings ...)
2021-10-28 2:40 ` [PULL 05/56] host-utils: add unit tests for divu128/divs128 Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 07/56] tcg/optimize: Split out OptContext Richard Henderson
` (50 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Prepare for tracking different masks by renaming this one.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
1 file changed, 72 insertions(+), 70 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index c239c3bd07..148e360fc6 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -41,7 +41,7 @@ typedef struct TempOptInfo {
TCGTemp *prev_copy;
TCGTemp *next_copy;
uint64_t val;
- uint64_t mask;
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
} TempOptInfo;
static inline TempOptInfo *ts_info(TCGTemp *ts)
@@ -81,7 +81,7 @@ static void reset_ts(TCGTemp *ts)
ti->next_copy = ts;
ti->prev_copy = ts;
ti->is_const = false;
- ti->mask = -1;
+ ti->z_mask = -1;
}
static void reset_temp(TCGArg arg)
@@ -111,14 +111,14 @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
if (ts->kind == TEMP_CONST) {
ti->is_const = true;
ti->val = ts->val;
- ti->mask = ts->val;
+ ti->z_mask = ts->val;
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
/* High bits of a 32-bit quantity are garbage. */
- ti->mask |= ~0xffffffffull;
+ ti->z_mask |= ~0xffffffffull;
}
} else {
ti->is_const = false;
- ti->mask = -1;
+ ti->z_mask = -1;
}
}
@@ -186,7 +186,7 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
const TCGOpDef *def;
TempOptInfo *di;
TempOptInfo *si;
- uint64_t mask;
+ uint64_t z_mask;
TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) {
@@ -210,12 +210,12 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
op->args[0] = dst;
op->args[1] = src;
- mask = si->mask;
+ z_mask = si->z_mask;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
/* High bits of the destination are now garbage. */
- mask |= ~0xffffffffull;
+ z_mask |= ~0xffffffffull;
}
- di->mask = mask;
+ di->z_mask = z_mask;
if (src_ts->type == dst_ts->type) {
TempOptInfo *ni = ts_info(si->next_copy);
@@ -621,7 +621,7 @@ void tcg_optimize(TCGContext *s)
}
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
- uint64_t mask, partmask, affected, tmp;
+ uint64_t z_mask, partmask, affected, tmp;
int nb_oargs, nb_iargs;
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
@@ -855,170 +855,172 @@ void tcg_optimize(TCGContext *s)
/* Simplify using known-zero bits. Currently only ops with a single
output argument is supported. */
- mask = -1;
+ z_mask = -1;
affected = -1;
switch (opc) {
CASE_OP_32_64(ext8s):
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
break;
}
QEMU_FALLTHROUGH;
CASE_OP_32_64(ext8u):
- mask = 0xff;
+ z_mask = 0xff;
goto and_const;
CASE_OP_32_64(ext16s):
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
break;
}
QEMU_FALLTHROUGH;
CASE_OP_32_64(ext16u):
- mask = 0xffff;
+ z_mask = 0xffff;
goto and_const;
case INDEX_op_ext32s_i64:
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
break;
}
QEMU_FALLTHROUGH;
case INDEX_op_ext32u_i64:
- mask = 0xffffffffU;
+ z_mask = 0xffffffffU;
goto and_const;
CASE_OP_32_64(and):
- mask = arg_info(op->args[2])->mask;
+ z_mask = arg_info(op->args[2])->z_mask;
if (arg_is_const(op->args[2])) {
and_const:
- affected = arg_info(op->args[1])->mask & ~mask;
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
}
- mask = arg_info(op->args[1])->mask & mask;
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
break;
case INDEX_op_ext_i32_i64:
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
break;
}
QEMU_FALLTHROUGH;
case INDEX_op_extu_i32_i64:
/* We do not compute affected as it is a size changing op. */
- mask = (uint32_t)arg_info(op->args[1])->mask;
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
break;
CASE_OP_32_64(andc):
/* Known-zeros does not imply known-ones. Therefore unless
op->args[2] is constant, we can't infer anything from it. */
if (arg_is_const(op->args[2])) {
- mask = ~arg_info(op->args[2])->mask;
+ z_mask = ~arg_info(op->args[2])->z_mask;
goto and_const;
}
/* But we certainly know nothing outside args[1] may be set. */
- mask = arg_info(op->args[1])->mask;
+ z_mask = arg_info(op->args[1])->z_mask;
break;
case INDEX_op_sar_i32:
if (arg_is_const(op->args[2])) {
tmp = arg_info(op->args[2])->val & 31;
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
}
break;
case INDEX_op_sar_i64:
if (arg_is_const(op->args[2])) {
tmp = arg_info(op->args[2])->val & 63;
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
}
break;
case INDEX_op_shr_i32:
if (arg_is_const(op->args[2])) {
tmp = arg_info(op->args[2])->val & 31;
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
}
break;
case INDEX_op_shr_i64:
if (arg_is_const(op->args[2])) {
tmp = arg_info(op->args[2])->val & 63;
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
}
break;
case INDEX_op_extrl_i64_i32:
- mask = (uint32_t)arg_info(op->args[1])->mask;
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
break;
case INDEX_op_extrh_i64_i32:
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
break;
CASE_OP_32_64(shl):
if (arg_is_const(op->args[2])) {
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
- mask = arg_info(op->args[1])->mask << tmp;
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
}
break;
CASE_OP_32_64(neg):
/* Set to 1 all bits to the left of the rightmost. */
- mask = -(arg_info(op->args[1])->mask
- & -arg_info(op->args[1])->mask);
+ z_mask = -(arg_info(op->args[1])->z_mask
+ & -arg_info(op->args[1])->z_mask);
break;
CASE_OP_32_64(deposit):
- mask = deposit64(arg_info(op->args[1])->mask,
- op->args[3], op->args[4],
- arg_info(op->args[2])->mask);
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
+ op->args[3], op->args[4],
+ arg_info(op->args[2])->z_mask);
break;
CASE_OP_32_64(extract):
- mask = extract64(arg_info(op->args[1])->mask,
- op->args[2], op->args[3]);
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
+ op->args[2], op->args[3]);
if (op->args[2] == 0) {
- affected = arg_info(op->args[1])->mask & ~mask;
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
}
break;
CASE_OP_32_64(sextract):
- mask = sextract64(arg_info(op->args[1])->mask,
- op->args[2], op->args[3]);
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
- affected = arg_info(op->args[1])->mask & ~mask;
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
+ op->args[2], op->args[3]);
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
}
break;
CASE_OP_32_64(or):
CASE_OP_32_64(xor):
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
+ z_mask = arg_info(op->args[1])->z_mask
+ | arg_info(op->args[2])->z_mask;
break;
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
- mask = arg_info(op->args[2])->mask | 31;
+ z_mask = arg_info(op->args[2])->z_mask | 31;
break;
case INDEX_op_clz_i64:
case INDEX_op_ctz_i64:
- mask = arg_info(op->args[2])->mask | 63;
+ z_mask = arg_info(op->args[2])->z_mask | 63;
break;
case INDEX_op_ctpop_i32:
- mask = 32 | 31;
+ z_mask = 32 | 31;
break;
case INDEX_op_ctpop_i64:
- mask = 64 | 63;
+ z_mask = 64 | 63;
break;
CASE_OP_32_64(setcond):
case INDEX_op_setcond2_i32:
- mask = 1;
+ z_mask = 1;
break;
CASE_OP_32_64(movcond):
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
+ z_mask = arg_info(op->args[3])->z_mask
+ | arg_info(op->args[4])->z_mask;
break;
CASE_OP_32_64(ld8u):
- mask = 0xff;
+ z_mask = 0xff;
break;
CASE_OP_32_64(ld16u):
- mask = 0xffff;
+ z_mask = 0xffff;
break;
case INDEX_op_ld32u_i64:
- mask = 0xffffffffu;
+ z_mask = 0xffffffffu;
break;
CASE_OP_32_64(qemu_ld):
@@ -1026,43 +1028,43 @@ void tcg_optimize(TCGContext *s)
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
MemOp mop = get_memop(oi);
if (!(mop & MO_SIGN)) {
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
}
}
break;
CASE_OP_32_64(bswap16):
- mask = arg_info(op->args[1])->mask;
- if (mask <= 0xffff) {
+ z_mask = arg_info(op->args[1])->z_mask;
+ if (z_mask <= 0xffff) {
op->args[2] |= TCG_BSWAP_IZ;
}
- mask = bswap16(mask);
+ z_mask = bswap16(z_mask);
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
case TCG_BSWAP_OZ:
break;
case TCG_BSWAP_OS:
- mask = (int16_t)mask;
+ z_mask = (int16_t)z_mask;
break;
default: /* undefined high bits */
- mask |= MAKE_64BIT_MASK(16, 48);
+ z_mask |= MAKE_64BIT_MASK(16, 48);
break;
}
break;
case INDEX_op_bswap32_i64:
- mask = arg_info(op->args[1])->mask;
- if (mask <= 0xffffffffu) {
+ z_mask = arg_info(op->args[1])->z_mask;
+ if (z_mask <= 0xffffffffu) {
op->args[2] |= TCG_BSWAP_IZ;
}
- mask = bswap32(mask);
+ z_mask = bswap32(z_mask);
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
case TCG_BSWAP_OZ:
break;
case TCG_BSWAP_OS:
- mask = (int32_t)mask;
+ z_mask = (int32_t)z_mask;
break;
default: /* undefined high bits */
- mask |= MAKE_64BIT_MASK(32, 32);
+ z_mask |= MAKE_64BIT_MASK(32, 32);
break;
}
break;
@@ -1074,9 +1076,9 @@ void tcg_optimize(TCGContext *s)
/* 32-bit ops generate 32-bit results. For the result is zero test
below, we can ignore high bits, but for further optimizations we
need to record that the high bits contain garbage. */
- partmask = mask;
+ partmask = z_mask;
if (!(def->flags & TCG_OPF_64BIT)) {
- mask |= ~(tcg_target_ulong)0xffffffffu;
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
partmask &= 0xffffffffu;
affected &= 0xffffffffu;
}
@@ -1472,7 +1474,7 @@ void tcg_optimize(TCGContext *s)
vs the high word of the input. */
do_setcond_high:
reset_temp(op->args[0]);
- arg_info(op->args[0])->mask = 1;
+ arg_info(op->args[0])->z_mask = 1;
op->opc = INDEX_op_setcond_i32;
op->args[1] = op->args[2];
op->args[2] = op->args[4];
@@ -1498,7 +1500,7 @@ void tcg_optimize(TCGContext *s)
}
do_setcond_low:
reset_temp(op->args[0]);
- arg_info(op->args[0])->mask = 1;
+ arg_info(op->args[0])->z_mask = 1;
op->opc = INDEX_op_setcond_i32;
op->args[2] = op->args[3];
op->args[3] = op->args[5];
@@ -1543,7 +1545,7 @@ void tcg_optimize(TCGContext *s)
/* Default case: we know nothing about operation (or were unable
to compute the operation result) so no propagation is done.
We trash everything if the operation is the end of a basic
- block, otherwise we only trash the output args. "mask" is
+ block, otherwise we only trash the output args. "z_mask" is
the non-zero bits mask for the first output arg. */
if (def->flags & TCG_OPF_BB_END) {
memset(&temps_used, 0, sizeof(temps_used));
@@ -1554,7 +1556,7 @@ void tcg_optimize(TCGContext *s)
/* Save the corresponding known-zero bits mask for the
first output argument (only one supported so far). */
if (i == 0) {
- arg_info(op->args[i])->mask = mask;
+ arg_info(op->args[i])->z_mask = z_mask;
}
}
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 07/56] tcg/optimize: Split out OptContext
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (5 preceding siblings ...)
2021-10-28 2:40 ` [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask" Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 08/56] tcg/optimize: Remove do_default label Richard Henderson
` (49 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Provide what will become a larger context for splitting
the very large tcg_optimize function.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
1 file changed, 40 insertions(+), 37 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 148e360fc6..b76991215e 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -44,6 +44,10 @@ typedef struct TempOptInfo {
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
} TempOptInfo;
+typedef struct OptContext {
+ TCGTempSet temps_used;
+} OptContext;
+
static inline TempOptInfo *ts_info(TCGTemp *ts)
{
return ts->state_ptr;
@@ -90,15 +94,15 @@ static void reset_temp(TCGArg arg)
}
/* Initialize and activate a temporary. */
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
{
size_t idx = temp_idx(ts);
TempOptInfo *ti;
- if (test_bit(idx, temps_used->l)) {
+ if (test_bit(idx, ctx->temps_used.l)) {
return;
}
- set_bit(idx, temps_used->l);
+ set_bit(idx, ctx->temps_used.l);
ti = ts->state_ptr;
if (ti == NULL) {
@@ -122,9 +126,9 @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
}
}
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
+static void init_arg_info(OptContext *ctx, TCGArg arg)
{
- init_ts_info(temps_used, arg_temp(arg));
+ init_ts_info(ctx, arg_temp(arg));
}
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
@@ -229,7 +233,7 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
}
}
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
TCGOp *op, TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
@@ -246,7 +250,7 @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
- init_ts_info(temps_used, tv);
+ init_ts_info(ctx, tv);
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
}
@@ -605,7 +609,7 @@ void tcg_optimize(TCGContext *s)
{
int nb_temps, nb_globals, i;
TCGOp *op, *op_next, *prev_mb = NULL;
- TCGTempSet temps_used;
+ OptContext ctx = {};
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
@@ -615,7 +619,6 @@ void tcg_optimize(TCGContext *s)
nb_temps = s->nb_temps;
nb_globals = s->nb_globals;
- memset(&temps_used, 0, sizeof(temps_used));
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
@@ -634,14 +637,14 @@ void tcg_optimize(TCGContext *s)
for (i = 0; i < nb_oargs + nb_iargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]);
if (ts) {
- init_ts_info(&temps_used, ts);
+ init_ts_info(&ctx, ts);
}
}
} else {
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
for (i = 0; i < nb_oargs + nb_iargs; i++) {
- init_arg_info(&temps_used, op->args[i]);
+ init_arg_info(&ctx, op->args[i]);
}
}
@@ -720,7 +723,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr):
if (arg_is_const(op->args[1])
&& arg_info(op->args[1])->val == 0) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1085,7 +1088,7 @@ void tcg_optimize(TCGContext *s)
if (partmask == 0) {
tcg_debug_assert(nb_oargs == 1);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
continue;
}
if (affected == 0) {
@@ -1102,7 +1105,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulsh):
if (arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1129,7 +1132,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
CASE_OP_32_64_VEC(xor):
if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1149,7 +1152,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1157,7 +1160,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
deposit64(arg_info(op->args[1])->val, 32, 32,
arg_info(op->args[2])->val));
break;
@@ -1183,7 +1186,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1194,7 +1197,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
op->args[2]);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1224,7 +1227,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1235,7 +1238,7 @@ void tcg_optimize(TCGContext *s)
TCGArg v = arg_info(op->args[1])->val;
if (v != 0) {
tmp = do_constant_folding(opc, v, 0);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
} else {
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
}
@@ -1248,7 +1251,7 @@ void tcg_optimize(TCGContext *s)
tmp = deposit64(arg_info(op->args[1])->val,
op->args[3], op->args[4],
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1257,7 +1260,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1266,7 +1269,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1283,7 +1286,7 @@ void tcg_optimize(TCGContext *s)
tmp = (int32_t)(((uint32_t)v1 >> shr) |
((uint32_t)v2 << (32 - shr)));
}
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1292,7 +1295,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
if (tmp != 2) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
break;
}
goto do_default;
@@ -1302,7 +1305,7 @@ void tcg_optimize(TCGContext *s)
op->args[1], op->args[2]);
if (tmp != 2) {
if (tmp) {
- memset(&temps_used, 0, sizeof(temps_used));
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
op->opc = INDEX_op_br;
op->args[0] = op->args[3];
} else {
@@ -1358,8 +1361,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
break;
}
goto do_default;
@@ -1374,8 +1377,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
break;
}
goto do_default;
@@ -1386,7 +1389,7 @@ void tcg_optimize(TCGContext *s)
if (tmp != 2) {
if (tmp) {
do_brcond_true:
- memset(&temps_used, 0, sizeof(temps_used));
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
op->opc = INDEX_op_br;
op->args[0] = op->args[5];
} else {
@@ -1402,7 +1405,7 @@ void tcg_optimize(TCGContext *s)
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_brcond_high:
- memset(&temps_used, 0, sizeof(temps_used));
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
op->opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
@@ -1428,7 +1431,7 @@ void tcg_optimize(TCGContext *s)
goto do_default;
}
do_brcond_low:
- memset(&temps_used, 0, sizeof(temps_used));
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
op->opc = INDEX_op_brcond_i32;
op->args[1] = op->args[2];
op->args[2] = op->args[4];
@@ -1463,7 +1466,7 @@ void tcg_optimize(TCGContext *s)
op->args[5]);
if (tmp != 2) {
do_setcond_const:
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
} else if ((op->args[5] == TCG_COND_LT
|| op->args[5] == TCG_COND_GE)
&& arg_is_const(op->args[3])
@@ -1533,7 +1536,7 @@ void tcg_optimize(TCGContext *s)
if (!(tcg_call_flags(op)
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
for (i = 0; i < nb_globals; i++) {
- if (test_bit(i, temps_used.l)) {
+ if (test_bit(i, ctx.temps_used.l)) {
reset_ts(&s->temps[i]);
}
}
@@ -1548,7 +1551,7 @@ void tcg_optimize(TCGContext *s)
block, otherwise we only trash the output args. "z_mask" is
the non-zero bits mask for the first output arg. */
if (def->flags & TCG_OPF_BB_END) {
- memset(&temps_used, 0, sizeof(temps_used));
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
} else {
do_reset_output:
for (i = 0; i < nb_oargs; i++) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 08/56] tcg/optimize: Remove do_default label
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (6 preceding siblings ...)
2021-10-28 2:40 ` [PULL 07/56] tcg/optimize: Split out OptContext Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface Richard Henderson
` (48 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Break the final cleanup clause out of the main switch
statement. When fully folding an opcode to mov/movi,
use "continue" to process the next opcode, else break
to fall into the final cleanup.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
1 file changed, 94 insertions(+), 96 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index b76991215e..a37efff4d0 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1146,16 +1146,16 @@ void tcg_optimize(TCGContext *s)
switch (opc) {
CASE_OP_32_64_VEC(mov):
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
- break;
+ continue;
case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
@@ -1163,13 +1163,13 @@ void tcg_optimize(TCGContext *s)
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
deposit64(arg_info(op->args[1])->val, 32, 32,
arg_info(op->args[2])->val));
- break;
+ continue;
} else if (args_are_copies(op->args[1], op->args[2])) {
op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32;
nb_iargs = 1;
}
- goto do_default;
+ break;
CASE_OP_32_64(not):
CASE_OP_32_64(neg):
@@ -1187,9 +1187,9 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(bswap16):
CASE_OP_32_64(bswap32):
@@ -1198,9 +1198,9 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
op->args[2]);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(add):
CASE_OP_32_64(sub):
@@ -1228,9 +1228,9 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
@@ -1242,9 +1242,9 @@ void tcg_optimize(TCGContext *s)
} else {
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
}
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(deposit):
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
@@ -1252,27 +1252,27 @@ void tcg_optimize(TCGContext *s)
op->args[3], op->args[4],
arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(extract):
if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(sextract):
if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(extract2):
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
@@ -1287,40 +1287,40 @@ void tcg_optimize(TCGContext *s)
((uint32_t)v2 << (32 - shr)));
}
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(setcond):
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
if (tmp != 2) {
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- break;
+ continue;
}
- goto do_default;
+ break;
CASE_OP_32_64(brcond):
tmp = do_constant_folding_cond(opc, op->args[0],
op->args[1], op->args[2]);
- if (tmp != 2) {
- if (tmp) {
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- op->opc = INDEX_op_br;
- op->args[0] = op->args[3];
- } else {
- tcg_op_remove(s, op);
- }
+ switch (tmp) {
+ case 0:
+ tcg_op_remove(s, op);
+ continue;
+ case 1:
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
+ op->opc = opc = INDEX_op_br;
+ op->args[0] = op->args[3];
break;
}
- goto do_default;
+ break;
CASE_OP_32_64(movcond):
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[5]);
if (tmp != 2) {
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
- break;
+ continue;
}
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
@@ -1330,7 +1330,7 @@ void tcg_optimize(TCGContext *s)
if (fv == 1 && tv == 0) {
cond = tcg_invert_cond(cond);
} else if (!(tv == 1 && fv == 0)) {
- goto do_default;
+ break;
}
op->args[3] = cond;
op->opc = opc = (opc == INDEX_op_movcond_i32
@@ -1338,7 +1338,7 @@ void tcg_optimize(TCGContext *s)
: INDEX_op_setcond_i64);
nb_iargs = 2;
}
- goto do_default;
+ break;
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
@@ -1363,9 +1363,9 @@ void tcg_optimize(TCGContext *s)
rh = op->args[1];
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
- break;
+ continue;
}
- goto do_default;
+ break;
case INDEX_op_mulu2_i32:
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
@@ -1379,39 +1379,40 @@ void tcg_optimize(TCGContext *s)
rh = op->args[1];
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
- break;
+ continue;
}
- goto do_default;
+ break;
case INDEX_op_brcond2_i32:
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
op->args[4]);
- if (tmp != 2) {
- if (tmp) {
- do_brcond_true:
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- op->opc = INDEX_op_br;
- op->args[0] = op->args[5];
- } else {
+ if (tmp == 0) {
do_brcond_false:
- tcg_op_remove(s, op);
- }
- } else if ((op->args[4] == TCG_COND_LT
- || op->args[4] == TCG_COND_GE)
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0
- && arg_is_const(op->args[3])
- && arg_info(op->args[3])->val == 0) {
+ tcg_op_remove(s, op);
+ continue;
+ }
+ if (tmp == 1) {
+ do_brcond_true:
+ op->opc = opc = INDEX_op_br;
+ op->args[0] = op->args[5];
+ break;
+ }
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
+ && arg_is_const(op->args[2])
+ && arg_info(op->args[2])->val == 0
+ && arg_is_const(op->args[3])
+ && arg_info(op->args[3])->val == 0) {
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_brcond_high:
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- op->opc = INDEX_op_brcond_i32;
+ op->opc = opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
op->args[2] = op->args[4];
op->args[3] = op->args[5];
- } else if (op->args[4] == TCG_COND_EQ) {
+ break;
+ }
+ if (op->args[4] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
@@ -1428,7 +1429,7 @@ void tcg_optimize(TCGContext *s)
if (tmp == 0) {
goto do_brcond_false;
} else if (tmp != 1) {
- goto do_default;
+ break;
}
do_brcond_low:
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
@@ -1436,7 +1437,9 @@ void tcg_optimize(TCGContext *s)
op->args[1] = op->args[2];
op->args[2] = op->args[4];
op->args[3] = op->args[5];
- } else if (op->args[4] == TCG_COND_NE) {
+ break;
+ }
+ if (op->args[4] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
@@ -1455,9 +1458,6 @@ void tcg_optimize(TCGContext *s)
} else if (tmp == 1) {
goto do_brcond_true;
}
- goto do_default;
- } else {
- goto do_default;
}
break;
@@ -1467,12 +1467,13 @@ void tcg_optimize(TCGContext *s)
if (tmp != 2) {
do_setcond_const:
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
- } else if ((op->args[5] == TCG_COND_LT
- || op->args[5] == TCG_COND_GE)
- && arg_is_const(op->args[3])
- && arg_info(op->args[3])->val == 0
- && arg_is_const(op->args[4])
- && arg_info(op->args[4])->val == 0) {
+ continue;
+ }
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
+ && arg_is_const(op->args[3])
+ && arg_info(op->args[3])->val == 0
+ && arg_is_const(op->args[4])
+ && arg_info(op->args[4])->val == 0) {
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_setcond_high:
@@ -1482,7 +1483,9 @@ void tcg_optimize(TCGContext *s)
op->args[1] = op->args[2];
op->args[2] = op->args[4];
op->args[3] = op->args[5];
- } else if (op->args[5] == TCG_COND_EQ) {
+ break;
+ }
+ if (op->args[5] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
@@ -1499,7 +1502,7 @@ void tcg_optimize(TCGContext *s)
if (tmp == 0) {
goto do_setcond_high;
} else if (tmp != 1) {
- goto do_default;
+ break;
}
do_setcond_low:
reset_temp(op->args[0]);
@@ -1507,7 +1510,9 @@ void tcg_optimize(TCGContext *s)
op->opc = INDEX_op_setcond_i32;
op->args[2] = op->args[3];
op->args[3] = op->args[5];
- } else if (op->args[5] == TCG_COND_NE) {
+ break;
+ }
+ if (op->args[5] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
@@ -1526,14 +1531,21 @@ void tcg_optimize(TCGContext *s)
} else if (tmp == 1) {
goto do_setcond_const;
}
- goto do_default;
- } else {
- goto do_default;
}
break;
- case INDEX_op_call:
- if (!(tcg_call_flags(op)
+ default:
+ break;
+ }
+
+ /* Some of the folding above can change opc. */
+ opc = op->opc;
+ def = &tcg_op_defs[opc];
+ if (def->flags & TCG_OPF_BB_END) {
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
+ } else {
+ if (opc == INDEX_op_call &&
+ !(tcg_call_flags(op)
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
for (i = 0; i < nb_globals; i++) {
if (test_bit(i, ctx.temps_used.l)) {
@@ -1541,29 +1553,15 @@ void tcg_optimize(TCGContext *s)
}
}
}
- goto do_reset_output;
- default:
- do_default:
- /* Default case: we know nothing about operation (or were unable
- to compute the operation result) so no propagation is done.
- We trash everything if the operation is the end of a basic
- block, otherwise we only trash the output args. "z_mask" is
- the non-zero bits mask for the first output arg. */
- if (def->flags & TCG_OPF_BB_END) {
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- } else {
- do_reset_output:
- for (i = 0; i < nb_oargs; i++) {
- reset_temp(op->args[i]);
- /* Save the corresponding known-zero bits mask for the
- first output argument (only one supported so far). */
- if (i == 0) {
- arg_info(op->args[i])->z_mask = z_mask;
- }
+ for (i = 0; i < nb_oargs; i++) {
+ reset_temp(op->args[i]);
+ /* Save the corresponding known-zero bits mask for the
+ first output argument (only one supported so far). */
+ if (i == 0) {
+ arg_info(op->args[i])->z_mask = z_mask;
}
}
- break;
}
/* Eliminate duplicate and redundant fence instructions. */
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (7 preceding siblings ...)
2021-10-28 2:40 ` [PULL 08/56] tcg/optimize: Remove do_default label Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 10/56] tcg/optimize: Move prev_mb into OptContext Richard Henderson
` (47 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Adjust the interface to take the OptContext parameter instead
of TCGContext or both.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
1 file changed, 34 insertions(+), 33 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index a37efff4d0..627a5b39f6 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -45,6 +45,7 @@ typedef struct TempOptInfo {
} TempOptInfo;
typedef struct OptContext {
+ TCGContext *tcg;
TCGTempSet temps_used;
} OptContext;
@@ -183,7 +184,7 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
}
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
TCGTemp *src_ts = arg_temp(src);
@@ -194,7 +195,7 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) {
- tcg_op_remove(s, op);
+ tcg_op_remove(ctx->tcg, op);
return;
}
@@ -233,8 +234,8 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
}
}
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
- TCGOp *op, TCGArg dst, uint64_t val)
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
+ TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
TCGType type;
@@ -251,7 +252,7 @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
init_ts_info(ctx, tv);
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
@@ -609,7 +610,7 @@ void tcg_optimize(TCGContext *s)
{
int nb_temps, nb_globals, i;
TCGOp *op, *op_next, *prev_mb = NULL;
- OptContext ctx = {};
+ OptContext ctx = { .tcg = s };
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
@@ -723,7 +724,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr):
if (arg_is_const(op->args[1])
&& arg_info(op->args[1])->val == 0) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -838,7 +839,7 @@ void tcg_optimize(TCGContext *s)
if (!arg_is_const(op->args[1])
&& arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -848,7 +849,7 @@ void tcg_optimize(TCGContext *s)
if (!arg_is_const(op->args[1])
&& arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == -1) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -1088,12 +1089,12 @@ void tcg_optimize(TCGContext *s)
if (partmask == 0) {
tcg_debug_assert(nb_oargs == 1);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
if (affected == 0) {
tcg_debug_assert(nb_oargs == 1);
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
@@ -1105,7 +1106,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulsh):
if (arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1118,7 +1119,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(or):
CASE_OP_32_64_VEC(and):
if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -1132,7 +1133,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
CASE_OP_32_64_VEC(xor):
if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1145,14 +1146,14 @@ void tcg_optimize(TCGContext *s)
allocator where needed and possible. Also detect copies. */
switch (opc) {
CASE_OP_32_64_VEC(mov):
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1160,7 +1161,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
deposit64(arg_info(op->args[1])->val, 32, 32,
arg_info(op->args[2])->val));
continue;
@@ -1186,7 +1187,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1197,7 +1198,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
op->args[2]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1227,7 +1228,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1238,9 +1239,9 @@ void tcg_optimize(TCGContext *s)
TCGArg v = arg_info(op->args[1])->val;
if (v != 0) {
tmp = do_constant_folding(opc, v, 0);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
} else {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
}
continue;
}
@@ -1251,7 +1252,7 @@ void tcg_optimize(TCGContext *s)
tmp = deposit64(arg_info(op->args[1])->val,
op->args[3], op->args[4],
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1260,7 +1261,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1269,7 +1270,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1286,7 +1287,7 @@ void tcg_optimize(TCGContext *s)
tmp = (int32_t)(((uint32_t)v1 >> shr) |
((uint32_t)v2 << (32 - shr)));
}
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1295,7 +1296,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
if (tmp != 2) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1319,7 +1320,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[5]);
if (tmp != 2) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
continue;
}
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
@@ -1361,8 +1362,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
continue;
}
break;
@@ -1377,8 +1378,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
continue;
}
break;
@@ -1466,7 +1467,7 @@ void tcg_optimize(TCGContext *s)
op->args[5]);
if (tmp != 2) {
do_setcond_const:
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 10/56] tcg/optimize: Move prev_mb into OptContext
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (8 preceding siblings ...)
2021-10-28 2:40 ` [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 11/56] tcg/optimize: Split out init_arguments Richard Henderson
` (46 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
This will expose the variable to subroutines that
will be broken out of tcg_optimize.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 627a5b39f6..b875d76354 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -46,6 +46,7 @@ typedef struct TempOptInfo {
typedef struct OptContext {
TCGContext *tcg;
+ TCGOp *prev_mb;
TCGTempSet temps_used;
} OptContext;
@@ -609,7 +610,7 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
void tcg_optimize(TCGContext *s)
{
int nb_temps, nb_globals, i;
- TCGOp *op, *op_next, *prev_mb = NULL;
+ TCGOp *op, *op_next;
OptContext ctx = { .tcg = s };
/* Array VALS has an element for each temp.
@@ -1566,7 +1567,7 @@ void tcg_optimize(TCGContext *s)
}
/* Eliminate duplicate and redundant fence instructions. */
- if (prev_mb) {
+ if (ctx.prev_mb) {
switch (opc) {
case INDEX_op_mb:
/* Merge two barriers of the same type into one,
@@ -1580,7 +1581,7 @@ void tcg_optimize(TCGContext *s)
* barrier. This is stricter than specified but for
* the purposes of TCG is better than not optimizing.
*/
- prev_mb->args[0] |= op->args[0];
+ ctx.prev_mb->args[0] |= op->args[0];
tcg_op_remove(s, op);
break;
@@ -1597,11 +1598,11 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i64:
case INDEX_op_call:
/* Opcodes that touch guest memory stop the optimization. */
- prev_mb = NULL;
+ ctx.prev_mb = NULL;
break;
}
} else if (opc == INDEX_op_mb) {
- prev_mb = op;
+ ctx.prev_mb = op;
}
}
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 11/56] tcg/optimize: Split out init_arguments
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (9 preceding siblings ...)
2021-10-28 2:40 ` [PULL 10/56] tcg/optimize: Move prev_mb into OptContext Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 12/56] tcg/optimize: Split out copy_propagate Richard Henderson
` (45 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
There was no real reason for calls to have separate code here.
Unify init for calls vs non-calls using the call path, which
handles TCG_CALL_DUMMY_ARG.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 25 +++++++++++--------------
1 file changed, 11 insertions(+), 14 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index b875d76354..019c5aaf81 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -128,11 +128,6 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
}
}
-static void init_arg_info(OptContext *ctx, TCGArg arg)
-{
- init_ts_info(ctx, arg_temp(arg));
-}
-
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
{
TCGTemp *i, *g, *l;
@@ -606,6 +601,16 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
return false;
}
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
+{
+ for (int i = 0; i < nb_args; i++) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+ if (ts) {
+ init_ts_info(ctx, ts);
+ }
+ }
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -636,19 +641,11 @@ void tcg_optimize(TCGContext *s)
if (opc == INDEX_op_call) {
nb_oargs = TCGOP_CALLO(op);
nb_iargs = TCGOP_CALLI(op);
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
- TCGTemp *ts = arg_temp(op->args[i]);
- if (ts) {
- init_ts_info(&ctx, ts);
- }
- }
} else {
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
- init_arg_info(&ctx, op->args[i]);
- }
}
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
/* Do copy propagation */
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 12/56] tcg/optimize: Split out copy_propagate
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (10 preceding siblings ...)
2021-10-28 2:40 ` [PULL 11/56] tcg/optimize: Split out init_arguments Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 13/56] tcg/optimize: Split out fold_call Richard Henderson
` (44 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Continue splitting tcg_optimize.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 019c5aaf81..fad6f5de1f 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -611,6 +611,19 @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
}
}
+static void copy_propagate(OptContext *ctx, TCGOp *op,
+ int nb_oargs, int nb_iargs)
+{
+ TCGContext *s = ctx->tcg;
+
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+ if (ts && ts_is_copy(ts)) {
+ op->args[i] = temp_arg(find_better_copy(s, ts));
+ }
+ }
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -646,14 +659,7 @@ void tcg_optimize(TCGContext *s)
nb_iargs = def->nb_iargs;
}
init_arguments(&ctx, op, nb_oargs + nb_iargs);
-
- /* Do copy propagation */
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
- TCGTemp *ts = arg_temp(op->args[i]);
- if (ts && ts_is_copy(ts)) {
- op->args[i] = temp_arg(find_better_copy(s, ts));
- }
- }
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
/* For commutative operations make constant second argument */
switch (opc) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 13/56] tcg/optimize: Split out fold_call
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (11 preceding siblings ...)
2021-10-28 2:40 ` [PULL 12/56] tcg/optimize: Split out copy_propagate Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
` (43 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Calls are special in that they have a variable number
of arguments, and need to be able to clobber globals.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
1 file changed, 41 insertions(+), 22 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index fad6f5de1f..74b9aa025a 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -624,10 +624,42 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
}
}
+static bool fold_call(OptContext *ctx, TCGOp *op)
+{
+ TCGContext *s = ctx->tcg;
+ int nb_oargs = TCGOP_CALLO(op);
+ int nb_iargs = TCGOP_CALLI(op);
+ int flags, i;
+
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
+
+ /* If the function reads or writes globals, reset temp data. */
+ flags = tcg_call_flags(op);
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
+ int nb_globals = s->nb_globals;
+
+ for (i = 0; i < nb_globals; i++) {
+ if (test_bit(i, ctx->temps_used.l)) {
+ reset_ts(&ctx->tcg->temps[i]);
+ }
+ }
+ }
+
+ /* Reset temp data for outputs. */
+ for (i = 0; i < nb_oargs; i++) {
+ reset_temp(op->args[i]);
+ }
+
+ /* Stop optimizing MB across calls. */
+ ctx->prev_mb = NULL;
+ return true;
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
- int nb_temps, nb_globals, i;
+ int nb_temps, i;
TCGOp *op, *op_next;
OptContext ctx = { .tcg = s };
@@ -637,8 +669,6 @@ void tcg_optimize(TCGContext *s)
available through the doubly linked circular list. */
nb_temps = s->nb_temps;
- nb_globals = s->nb_globals;
-
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
@@ -647,17 +677,17 @@ void tcg_optimize(TCGContext *s)
uint64_t z_mask, partmask, affected, tmp;
int nb_oargs, nb_iargs;
TCGOpcode opc = op->opc;
- const TCGOpDef *def = &tcg_op_defs[opc];
+ const TCGOpDef *def;
- /* Count the arguments, and initialize the temps that are
- going to be used */
+ /* Calls are special. */
if (opc == INDEX_op_call) {
- nb_oargs = TCGOP_CALLO(op);
- nb_iargs = TCGOP_CALLI(op);
- } else {
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
+ fold_call(&ctx, op);
+ continue;
}
+
+ def = &tcg_op_defs[opc];
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
init_arguments(&ctx, op, nb_oargs + nb_iargs);
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
@@ -1549,16 +1579,6 @@ void tcg_optimize(TCGContext *s)
if (def->flags & TCG_OPF_BB_END) {
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
} else {
- if (opc == INDEX_op_call &&
- !(tcg_call_flags(op)
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
- for (i = 0; i < nb_globals; i++) {
- if (test_bit(i, ctx.temps_used.l)) {
- reset_ts(&s->temps[i]);
- }
- }
- }
-
for (i = 0; i < nb_oargs; i++) {
reset_temp(op->args[i]);
/* Save the corresponding known-zero bits mask for the
@@ -1599,7 +1619,6 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32:
case INDEX_op_qemu_st_i64:
- case INDEX_op_call:
/* Opcodes that touch guest memory stop the optimization. */
ctx.prev_mb = NULL;
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (12 preceding siblings ...)
2021-10-28 2:40 ` [PULL 13/56] tcg/optimize: Split out fold_call Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
` (42 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Rather than try to keep these up-to-date across folding,
re-read nb_oargs at the end, after re-reading the opcode.
A couple of asserts need dropping, but that will take care
of itself as we split the function further.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 74b9aa025a..77cdffaaef 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -675,7 +675,6 @@ void tcg_optimize(TCGContext *s)
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
uint64_t z_mask, partmask, affected, tmp;
- int nb_oargs, nb_iargs;
TCGOpcode opc = op->opc;
const TCGOpDef *def;
@@ -686,10 +685,8 @@ void tcg_optimize(TCGContext *s)
}
def = &tcg_op_defs[opc];
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
/* For commutative operations make constant second argument */
switch (opc) {
@@ -1063,7 +1060,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(qemu_ld):
{
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
MemOp mop = get_memop(oi);
if (!(mop & MO_SIGN)) {
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
@@ -1122,12 +1119,10 @@ void tcg_optimize(TCGContext *s)
}
if (partmask == 0) {
- tcg_debug_assert(nb_oargs == 1);
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
if (affected == 0) {
- tcg_debug_assert(nb_oargs == 1);
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
@@ -1202,7 +1197,6 @@ void tcg_optimize(TCGContext *s)
} else if (args_are_copies(op->args[1], op->args[2])) {
op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32;
- nb_iargs = 1;
}
break;
@@ -1371,7 +1365,6 @@ void tcg_optimize(TCGContext *s)
op->opc = opc = (opc == INDEX_op_movcond_i32
? INDEX_op_setcond_i32
: INDEX_op_setcond_i64);
- nb_iargs = 2;
}
break;
@@ -1579,6 +1572,7 @@ void tcg_optimize(TCGContext *s)
if (def->flags & TCG_OPF_BB_END) {
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
} else {
+ int nb_oargs = def->nb_oargs;
for (i = 0; i < nb_oargs; i++) {
reset_temp(op->args[i]);
/* Save the corresponding known-zero bits mask for the
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond*
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (13 preceding siblings ...)
2021-10-28 2:40 ` [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 16/56] tcg/optimize: Return true from tcg_opt_gen_{mov,movi} Richard Henderson
` (41 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Return -1 instead of 2 for failure, so that we can
use comparisons against 0 for all cases.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
1 file changed, 74 insertions(+), 71 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 77cdffaaef..19c01687b4 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -502,10 +502,12 @@ static bool do_constant_folding_cond_eq(TCGCond c)
}
}
-/* Return 2 if the condition can't be simplified, and the result
- of the condition (0 or 1) if it can */
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
- TCGArg y, TCGCond c)
+/*
+ * Return -1 if the condition can't be simplified,
+ * and the result of the condition (0 or 1) if it can.
+ */
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
+ TCGArg y, TCGCond c)
{
uint64_t xv = arg_info(x)->val;
uint64_t yv = arg_info(y)->val;
@@ -527,15 +529,17 @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
case TCG_COND_GEU:
return 1;
default:
- return 2;
+ return -1;
}
}
- return 2;
+ return -1;
}
-/* Return 2 if the condition can't be simplified, and the result
- of the condition (0 or 1) if it can */
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
+/*
+ * Return -1 if the condition can't be simplified,
+ * and the result of the condition (0 or 1) if it can.
+ */
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
{
TCGArg al = p1[0], ah = p1[1];
TCGArg bl = p2[0], bh = p2[1];
@@ -565,7 +569,7 @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
return do_constant_folding_cond_eq(c);
}
- return 2;
+ return -1;
}
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
@@ -1321,22 +1325,21 @@ void tcg_optimize(TCGContext *s)
break;
CASE_OP_32_64(setcond):
- tmp = do_constant_folding_cond(opc, op->args[1],
- op->args[2], op->args[3]);
- if (tmp != 2) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
+ i = do_constant_folding_cond(opc, op->args[1],
+ op->args[2], op->args[3]);
+ if (i >= 0) {
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
continue;
}
break;
CASE_OP_32_64(brcond):
- tmp = do_constant_folding_cond(opc, op->args[0],
- op->args[1], op->args[2]);
- switch (tmp) {
- case 0:
+ i = do_constant_folding_cond(opc, op->args[0],
+ op->args[1], op->args[2]);
+ if (i == 0) {
tcg_op_remove(s, op);
continue;
- case 1:
+ } else if (i > 0) {
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
op->opc = opc = INDEX_op_br;
op->args[0] = op->args[3];
@@ -1345,10 +1348,10 @@ void tcg_optimize(TCGContext *s)
break;
CASE_OP_32_64(movcond):
- tmp = do_constant_folding_cond(opc, op->args[1],
- op->args[2], op->args[5]);
- if (tmp != 2) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
+ i = do_constant_folding_cond(opc, op->args[1],
+ op->args[2], op->args[5]);
+ if (i >= 0) {
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
continue;
}
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
@@ -1412,14 +1415,14 @@ void tcg_optimize(TCGContext *s)
break;
case INDEX_op_brcond2_i32:
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
- op->args[4]);
- if (tmp == 0) {
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
+ op->args[4]);
+ if (i == 0) {
do_brcond_false:
tcg_op_remove(s, op);
continue;
}
- if (tmp == 1) {
+ if (i > 0) {
do_brcond_true:
op->opc = opc = INDEX_op_br;
op->args[0] = op->args[5];
@@ -1443,20 +1446,20 @@ void tcg_optimize(TCGContext *s)
if (op->args[4] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[0], op->args[2],
- TCG_COND_EQ);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
+ op->args[0], op->args[2],
+ TCG_COND_EQ);
+ if (i == 0) {
goto do_brcond_false;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_brcond_high;
}
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[1], op->args[3],
- TCG_COND_EQ);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
+ op->args[1], op->args[3],
+ TCG_COND_EQ);
+ if (i == 0) {
goto do_brcond_false;
- } else if (tmp != 1) {
+ } else if (i < 0) {
break;
}
do_brcond_low:
@@ -1470,31 +1473,31 @@ void tcg_optimize(TCGContext *s)
if (op->args[4] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[0], op->args[2],
- TCG_COND_NE);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
+ op->args[0], op->args[2],
+ TCG_COND_NE);
+ if (i == 0) {
goto do_brcond_high;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_brcond_true;
}
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[1], op->args[3],
- TCG_COND_NE);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
+ op->args[1], op->args[3],
+ TCG_COND_NE);
+ if (i == 0) {
goto do_brcond_low;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_brcond_true;
}
}
break;
case INDEX_op_setcond2_i32:
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
- op->args[5]);
- if (tmp != 2) {
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
+ op->args[5]);
+ if (i >= 0) {
do_setcond_const:
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
continue;
}
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
@@ -1516,20 +1519,20 @@ void tcg_optimize(TCGContext *s)
if (op->args[5] == TCG_COND_EQ) {
/* Simplify EQ comparisons where one of the pairs
can be simplified. */
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[1], op->args[3],
- TCG_COND_EQ);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
+ op->args[1], op->args[3],
+ TCG_COND_EQ);
+ if (i == 0) {
goto do_setcond_const;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_setcond_high;
}
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[2], op->args[4],
- TCG_COND_EQ);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
+ op->args[2], op->args[4],
+ TCG_COND_EQ);
+ if (i == 0) {
goto do_setcond_high;
- } else if (tmp != 1) {
+ } else if (i < 0) {
break;
}
do_setcond_low:
@@ -1543,20 +1546,20 @@ void tcg_optimize(TCGContext *s)
if (op->args[5] == TCG_COND_NE) {
/* Simplify NE comparisons where one of the pairs
can be simplified. */
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[1], op->args[3],
- TCG_COND_NE);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
+ op->args[1], op->args[3],
+ TCG_COND_NE);
+ if (i == 0) {
goto do_setcond_high;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_setcond_const;
}
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[2], op->args[4],
- TCG_COND_NE);
- if (tmp == 0) {
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
+ op->args[2], op->args[4],
+ TCG_COND_NE);
+ if (i == 0) {
goto do_setcond_low;
- } else if (tmp == 1) {
+ } else if (i > 0) {
goto do_setcond_const;
}
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 16/56] tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (14 preceding siblings ...)
2021-10-28 2:40 ` [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond* Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 17/56] tcg/optimize: Split out finish_folding Richard Henderson
` (40 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
This will allow callers to tail call to these functions
and return true indicating processing complete.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 19c01687b4..066e635f73 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -180,7 +180,7 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
}
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
TCGTemp *src_ts = arg_temp(src);
@@ -192,7 +192,7 @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
if (ts_are_copies(dst_ts, src_ts)) {
tcg_op_remove(ctx->tcg, op);
- return;
+ return true;
}
reset_ts(dst_ts);
@@ -228,9 +228,10 @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
di->is_const = si->is_const;
di->val = si->val;
}
+ return true;
}
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
@@ -248,7 +249,7 @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
init_ts_info(ctx, tv);
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 17/56] tcg/optimize: Split out finish_folding
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (15 preceding siblings ...)
2021-10-28 2:40 ` [PULL 16/56] tcg/optimize: Return true from tcg_opt_gen_{mov,movi} Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
` (39 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Copy z_mask into OptContext, for writeback to the
first output within the new function.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
1 file changed, 33 insertions(+), 16 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 066e635f73..368457f4a2 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -48,6 +48,9 @@ typedef struct OptContext {
TCGContext *tcg;
TCGOp *prev_mb;
TCGTempSet temps_used;
+
+ /* In flight values from optimization. */
+ uint64_t z_mask;
} OptContext;
static inline TempOptInfo *ts_info(TCGTemp *ts)
@@ -629,6 +632,34 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
}
}
+static void finish_folding(OptContext *ctx, TCGOp *op)
+{
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
+ int i, nb_oargs;
+
+ /*
+ * For an opcode that ends a BB, reset all temp data.
+ * We do no cross-BB optimization.
+ */
+ if (def->flags & TCG_OPF_BB_END) {
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
+ ctx->prev_mb = NULL;
+ return;
+ }
+
+ nb_oargs = def->nb_oargs;
+ for (i = 0; i < nb_oargs; i++) {
+ reset_temp(op->args[i]);
+ /*
+ * Save the corresponding known-zero bits mask for the
+ * first output argument (only one supported so far).
+ */
+ if (i == 0) {
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
+ }
+ }
+}
+
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@@ -1122,6 +1153,7 @@ void tcg_optimize(TCGContext *s)
partmask &= 0xffffffffu;
affected &= 0xffffffffu;
}
+ ctx.z_mask = z_mask;
if (partmask == 0) {
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
@@ -1570,22 +1602,7 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Some of the folding above can change opc. */
- opc = op->opc;
- def = &tcg_op_defs[opc];
- if (def->flags & TCG_OPF_BB_END) {
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- } else {
- int nb_oargs = def->nb_oargs;
- for (i = 0; i < nb_oargs; i++) {
- reset_temp(op->args[i]);
- /* Save the corresponding known-zero bits mask for the
- first output argument (only one supported so far). */
- if (i == 0) {
- arg_info(op->args[i])->z_mask = z_mask;
- }
- }
- }
+ finish_folding(&ctx, op);
/* Eliminate duplicate and redundant fence instructions. */
if (ctx.prev_mb) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (16 preceding siblings ...)
2021-10-28 2:40 ` [PULL 17/56] tcg/optimize: Split out finish_folding Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 19/56] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
` (38 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 368457f4a2..699476e2f1 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -713,6 +713,7 @@ void tcg_optimize(TCGContext *s)
uint64_t z_mask, partmask, affected, tmp;
TCGOpcode opc = op->opc;
const TCGOpDef *def;
+ bool done = false;
/* Calls are special. */
if (opc == INDEX_op_call) {
@@ -1212,8 +1213,8 @@ void tcg_optimize(TCGContext *s)
allocator where needed and possible. Also detect copies. */
switch (opc) {
CASE_OP_32_64_VEC(mov):
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
+ break;
case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) {
@@ -1602,7 +1603,9 @@ void tcg_optimize(TCGContext *s)
break;
}
- finish_folding(&ctx, op);
+ if (!done) {
+ finish_folding(&ctx, op);
+ }
/* Eliminate duplicate and redundant fence instructions. */
if (ctx.prev_mb) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 19/56] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (17 preceding siblings ...)
2021-10-28 2:40 ` [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 20/56] tcg/optimize: Split out fold_const{1,2} Richard Henderson
` (37 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
This puts the separate mb optimization into the same framework
as the others. While fold_qemu_{ld,st} are currently identical,
that won't last as more code gets moved.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
1 file changed, 51 insertions(+), 38 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 699476e2f1..159a5a9ee5 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -692,6 +692,44 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_mb(OptContext *ctx, TCGOp *op)
+{
+ /* Eliminate duplicate and redundant fence instructions. */
+ if (ctx->prev_mb) {
+ /*
+ * Merge two barriers of the same type into one,
+ * or a weaker barrier into a stronger one,
+ * or two weaker barriers into a stronger one.
+ * mb X; mb Y => mb X|Y
+ * mb; strl => mb; st
+ * ldaq; mb => ld; mb
+ * ldaq; strl => ld; mb; st
+ * Other combinations are also merged into a strong
+ * barrier. This is stricter than specified but for
+ * the purposes of TCG is better than not optimizing.
+ */
+ ctx->prev_mb->args[0] |= op->args[0];
+ tcg_op_remove(ctx->tcg, op);
+ } else {
+ ctx->prev_mb = op;
+ }
+ return true;
+}
+
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
+{
+ /* Opcodes that touch guest memory stop the mb optimization. */
+ ctx->prev_mb = NULL;
+ return false;
+}
+
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
+{
+ /* Opcodes that touch guest memory stop the mb optimization. */
+ ctx->prev_mb = NULL;
+ return false;
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -1599,6 +1637,19 @@ void tcg_optimize(TCGContext *s)
}
break;
+ case INDEX_op_mb:
+ done = fold_mb(&ctx, op);
+ break;
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ done = fold_qemu_ld(&ctx, op);
+ break;
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st8_i32:
+ case INDEX_op_qemu_st_i64:
+ done = fold_qemu_st(&ctx, op);
+ break;
+
default:
break;
}
@@ -1606,43 +1657,5 @@ void tcg_optimize(TCGContext *s)
if (!done) {
finish_folding(&ctx, op);
}
-
- /* Eliminate duplicate and redundant fence instructions. */
- if (ctx.prev_mb) {
- switch (opc) {
- case INDEX_op_mb:
- /* Merge two barriers of the same type into one,
- * or a weaker barrier into a stronger one,
- * or two weaker barriers into a stronger one.
- * mb X; mb Y => mb X|Y
- * mb; strl => mb; st
- * ldaq; mb => ld; mb
- * ldaq; strl => ld; mb; st
- * Other combinations are also merged into a strong
- * barrier. This is stricter than specified but for
- * the purposes of TCG is better than not optimizing.
- */
- ctx.prev_mb->args[0] |= op->args[0];
- tcg_op_remove(s, op);
- break;
-
- default:
- /* Opcodes that end the block stop the optimization. */
- if ((def->flags & TCG_OPF_BB_END) == 0) {
- break;
- }
- /* fallthru */
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_ld_i64:
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st8_i32:
- case INDEX_op_qemu_st_i64:
- /* Opcodes that touch guest memory stop the optimization. */
- ctx.prev_mb = NULL;
- break;
- }
- } else if (opc == INDEX_op_mb) {
- ctx.prev_mb = op;
- }
}
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 20/56] tcg/optimize: Split out fold_const{1,2}
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (18 preceding siblings ...)
2021-10-28 2:40 ` [PULL 19/56] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 21/56] tcg/optimize: Split out fold_setcond2 Richard Henderson
` (36 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Split out a whole bunch of placeholder functions, which are
currently identical. That won't last as more code gets moved.
Use CASE_32_64_VEC for some logical operators that previously
missed the addition of vectors.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
1 file changed, 219 insertions(+), 52 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 159a5a9ee5..5c3f8e8fcd 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -660,6 +660,60 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
}
}
+/*
+ * The fold_* functions return true when processing is complete,
+ * usually by folding the operation to a constant or to a copy,
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
+ * like collect information about the value produced, for use in
+ * optimizing a subsequent operation.
+ *
+ * These first fold_* functions are all helpers, used by other
+ * folders for more specific operations.
+ */
+
+static bool fold_const1(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t;
+
+ t = arg_info(op->args[1])->val;
+ t = do_constant_folding(op->opc, t, 0);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
+static bool fold_const2(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t t1 = arg_info(op->args[1])->val;
+ uint64_t t2 = arg_info(op->args[2])->val;
+
+ t1 = do_constant_folding(op->opc, t1, t2);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+ }
+ return false;
+}
+
+/*
+ * These outermost fold_<op> functions are sorted alphabetically.
+ */
+
+static bool fold_add(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_and(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_andc(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@@ -692,6 +746,31 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_divide(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_exts(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_extu(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
static bool fold_mb(OptContext *ctx, TCGOp *op)
{
/* Eliminate duplicate and redundant fence instructions. */
@@ -716,6 +795,46 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_mul(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_nand(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_neg(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_nor(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_not(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_or(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_orc(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
{
/* Opcodes that touch guest memory stop the mb optimization. */
@@ -730,6 +849,26 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_shift(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_xor(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -1276,26 +1415,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(not):
- CASE_OP_32_64(neg):
- CASE_OP_32_64(ext8s):
- CASE_OP_32_64(ext8u):
- CASE_OP_32_64(ext16s):
- CASE_OP_32_64(ext16u):
- CASE_OP_32_64(ctpop):
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
- if (arg_is_const(op->args[1])) {
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
CASE_OP_32_64(bswap16):
CASE_OP_32_64(bswap32):
case INDEX_op_bswap64_i64:
@@ -1307,36 +1426,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(add):
- CASE_OP_32_64(sub):
- CASE_OP_32_64(mul):
- CASE_OP_32_64(or):
- CASE_OP_32_64(and):
- CASE_OP_32_64(xor):
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- CASE_OP_32_64(andc):
- CASE_OP_32_64(orc):
- CASE_OP_32_64(eqv):
- CASE_OP_32_64(nand):
- CASE_OP_32_64(nor):
- CASE_OP_32_64(muluh):
- CASE_OP_32_64(mulsh):
- CASE_OP_32_64(div):
- CASE_OP_32_64(divu):
- CASE_OP_32_64(rem):
- CASE_OP_32_64(remu):
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
- arg_info(op->args[2])->val);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
if (arg_is_const(op->args[1])) {
@@ -1637,9 +1726,73 @@ void tcg_optimize(TCGContext *s)
}
break;
+ default:
+ break;
+
+ /* ---------------------------------------------------------- */
+ /* Sorted alphabetically by opcode as much as possible. */
+
+ CASE_OP_32_64_VEC(add):
+ done = fold_add(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(and):
+ done = fold_and(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(andc):
+ done = fold_andc(&ctx, op);
+ break;
+ CASE_OP_32_64(ctpop):
+ done = fold_ctpop(&ctx, op);
+ break;
+ CASE_OP_32_64(div):
+ CASE_OP_32_64(divu):
+ done = fold_divide(&ctx, op);
+ break;
+ CASE_OP_32_64(eqv):
+ done = fold_eqv(&ctx, op);
+ break;
+ CASE_OP_32_64(ext8s):
+ CASE_OP_32_64(ext16s):
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext_i32_i64:
+ done = fold_exts(&ctx, op);
+ break;
+ CASE_OP_32_64(ext8u):
+ CASE_OP_32_64(ext16u):
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ done = fold_extu(&ctx, op);
+ break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
+ CASE_OP_32_64(mul):
+ done = fold_mul(&ctx, op);
+ break;
+ CASE_OP_32_64(mulsh):
+ CASE_OP_32_64(muluh):
+ done = fold_mul_highpart(&ctx, op);
+ break;
+ CASE_OP_32_64(nand):
+ done = fold_nand(&ctx, op);
+ break;
+ CASE_OP_32_64(neg):
+ done = fold_neg(&ctx, op);
+ break;
+ CASE_OP_32_64(nor):
+ done = fold_nor(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(not):
+ done = fold_not(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(or):
+ done = fold_or(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(orc):
+ done = fold_orc(&ctx, op);
+ break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
done = fold_qemu_ld(&ctx, op);
@@ -1649,8 +1802,22 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i64:
done = fold_qemu_st(&ctx, op);
break;
-
- default:
+ CASE_OP_32_64(rem):
+ CASE_OP_32_64(remu):
+ done = fold_remainder(&ctx, op);
+ break;
+ CASE_OP_32_64(rotl):
+ CASE_OP_32_64(rotr):
+ CASE_OP_32_64(sar):
+ CASE_OP_32_64(shl):
+ CASE_OP_32_64(shr):
+ done = fold_shift(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(sub):
+ done = fold_sub(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(xor):
+ done = fold_xor(&ctx, op);
break;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 21/56] tcg/optimize: Split out fold_setcond2
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (19 preceding siblings ...)
2021-10-28 2:40 ` [PULL 20/56] tcg/optimize: Split out fold_const{1,2} Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 22/56] tcg/optimize: Split out fold_brcond2 Richard Henderson
` (35 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Reduce some code duplication by folding the NE and EQ cases.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
1 file changed, 72 insertions(+), 73 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 5c3f8e8fcd..80e43deb8e 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -854,6 +854,75 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
+{
+ TCGCond cond = op->args[5];
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
+ int inv = 0;
+
+ if (i >= 0) {
+ goto do_setcond_const;
+ }
+
+ switch (cond) {
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ /*
+ * Simplify LT/GE comparisons vs zero to a single compare
+ * vs the high word of the input.
+ */
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
+ goto do_setcond_high;
+ }
+ break;
+
+ case TCG_COND_NE:
+ inv = 1;
+ QEMU_FALLTHROUGH;
+ case TCG_COND_EQ:
+ /*
+ * Simplify EQ/NE comparisons where one of the pairs
+ * can be simplified.
+ */
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
+ op->args[3], cond);
+ switch (i ^ inv) {
+ case 0:
+ goto do_setcond_const;
+ case 1:
+ goto do_setcond_high;
+ }
+
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
+ op->args[4], cond);
+ switch (i ^ inv) {
+ case 0:
+ goto do_setcond_const;
+ case 1:
+ op->args[2] = op->args[3];
+ op->args[3] = cond;
+ op->opc = INDEX_op_setcond_i32;
+ break;
+ }
+ break;
+
+ default:
+ break;
+
+ do_setcond_high:
+ op->args[1] = op->args[2];
+ op->args[2] = op->args[4];
+ op->args[3] = cond;
+ op->opc = INDEX_op_setcond_i32;
+ break;
+ }
+ return false;
+
+ do_setcond_const:
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+}
+
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1653,79 +1722,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- case INDEX_op_setcond2_i32:
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
- op->args[5]);
- if (i >= 0) {
- do_setcond_const:
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
- continue;
- }
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
- && arg_is_const(op->args[3])
- && arg_info(op->args[3])->val == 0
- && arg_is_const(op->args[4])
- && arg_info(op->args[4])->val == 0) {
- /* Simplify LT/GE comparisons vs zero to a single compare
- vs the high word of the input. */
- do_setcond_high:
- reset_temp(op->args[0]);
- arg_info(op->args[0])->z_mask = 1;
- op->opc = INDEX_op_setcond_i32;
- op->args[1] = op->args[2];
- op->args[2] = op->args[4];
- op->args[3] = op->args[5];
- break;
- }
- if (op->args[5] == TCG_COND_EQ) {
- /* Simplify EQ comparisons where one of the pairs
- can be simplified. */
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[1], op->args[3],
- TCG_COND_EQ);
- if (i == 0) {
- goto do_setcond_const;
- } else if (i > 0) {
- goto do_setcond_high;
- }
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[2], op->args[4],
- TCG_COND_EQ);
- if (i == 0) {
- goto do_setcond_high;
- } else if (i < 0) {
- break;
- }
- do_setcond_low:
- reset_temp(op->args[0]);
- arg_info(op->args[0])->z_mask = 1;
- op->opc = INDEX_op_setcond_i32;
- op->args[2] = op->args[3];
- op->args[3] = op->args[5];
- break;
- }
- if (op->args[5] == TCG_COND_NE) {
- /* Simplify NE comparisons where one of the pairs
- can be simplified. */
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[1], op->args[3],
- TCG_COND_NE);
- if (i == 0) {
- goto do_setcond_high;
- } else if (i > 0) {
- goto do_setcond_const;
- }
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
- op->args[2], op->args[4],
- TCG_COND_NE);
- if (i == 0) {
- goto do_setcond_low;
- } else if (i > 0) {
- goto do_setcond_const;
- }
- }
- break;
-
default:
break;
@@ -1813,6 +1809,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(shr):
done = fold_shift(&ctx, op);
break;
+ case INDEX_op_setcond2_i32:
+ done = fold_setcond2(&ctx, op);
+ break;
CASE_OP_32_64_VEC(sub):
done = fold_sub(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 22/56] tcg/optimize: Split out fold_brcond2
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (20 preceding siblings ...)
2021-10-28 2:40 ` [PULL 21/56] tcg/optimize: Split out fold_setcond2 Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 23/56] tcg/optimize: Split out fold_brcond Richard Henderson
` (34 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Reduce some code duplication by folding the NE and EQ cases.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
1 file changed, 81 insertions(+), 78 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 80e43deb8e..c9db14f1d0 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -714,6 +714,84 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
+{
+ TCGCond cond = op->args[4];
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
+ TCGArg label = op->args[5];
+ int inv = 0;
+
+ if (i >= 0) {
+ goto do_brcond_const;
+ }
+
+ switch (cond) {
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ /*
+ * Simplify LT/GE comparisons vs zero to a single compare
+ * vs the high word of the input.
+ */
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
+ goto do_brcond_high;
+ }
+ break;
+
+ case TCG_COND_NE:
+ inv = 1;
+ QEMU_FALLTHROUGH;
+ case TCG_COND_EQ:
+ /*
+ * Simplify EQ/NE comparisons where one of the pairs
+ * can be simplified.
+ */
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
+ op->args[2], cond);
+ switch (i ^ inv) {
+ case 0:
+ goto do_brcond_const;
+ case 1:
+ goto do_brcond_high;
+ }
+
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
+ op->args[3], cond);
+ switch (i ^ inv) {
+ case 0:
+ goto do_brcond_const;
+ case 1:
+ op->opc = INDEX_op_brcond_i32;
+ op->args[1] = op->args[2];
+ op->args[2] = cond;
+ op->args[3] = label;
+ break;
+ }
+ break;
+
+ default:
+ break;
+
+ do_brcond_high:
+ op->opc = INDEX_op_brcond_i32;
+ op->args[0] = op->args[1];
+ op->args[1] = op->args[3];
+ op->args[2] = cond;
+ op->args[3] = label;
+ break;
+
+ do_brcond_const:
+ if (i == 0) {
+ tcg_op_remove(ctx->tcg, op);
+ return true;
+ }
+ op->opc = INDEX_op_br;
+ op->args[0] = label;
+ break;
+ }
+ return false;
+}
+
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@@ -1644,84 +1722,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- case INDEX_op_brcond2_i32:
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
- op->args[4]);
- if (i == 0) {
- do_brcond_false:
- tcg_op_remove(s, op);
- continue;
- }
- if (i > 0) {
- do_brcond_true:
- op->opc = opc = INDEX_op_br;
- op->args[0] = op->args[5];
- break;
- }
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0
- && arg_is_const(op->args[3])
- && arg_info(op->args[3])->val == 0) {
- /* Simplify LT/GE comparisons vs zero to a single compare
- vs the high word of the input. */
- do_brcond_high:
- op->opc = opc = INDEX_op_brcond_i32;
- op->args[0] = op->args[1];
- op->args[1] = op->args[3];
- op->args[2] = op->args[4];
- op->args[3] = op->args[5];
- break;
- }
- if (op->args[4] == TCG_COND_EQ) {
- /* Simplify EQ comparisons where one of the pairs
- can be simplified. */
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[0], op->args[2],
- TCG_COND_EQ);
- if (i == 0) {
- goto do_brcond_false;
- } else if (i > 0) {
- goto do_brcond_high;
- }
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[1], op->args[3],
- TCG_COND_EQ);
- if (i == 0) {
- goto do_brcond_false;
- } else if (i < 0) {
- break;
- }
- do_brcond_low:
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- op->opc = INDEX_op_brcond_i32;
- op->args[1] = op->args[2];
- op->args[2] = op->args[4];
- op->args[3] = op->args[5];
- break;
- }
- if (op->args[4] == TCG_COND_NE) {
- /* Simplify NE comparisons where one of the pairs
- can be simplified. */
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[0], op->args[2],
- TCG_COND_NE);
- if (i == 0) {
- goto do_brcond_high;
- } else if (i > 0) {
- goto do_brcond_true;
- }
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
- op->args[1], op->args[3],
- TCG_COND_NE);
- if (i == 0) {
- goto do_brcond_low;
- } else if (i > 0) {
- goto do_brcond_true;
- }
- }
- break;
-
default:
break;
@@ -1737,6 +1737,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(andc):
done = fold_andc(&ctx, op);
break;
+ case INDEX_op_brcond2_i32:
+ done = fold_brcond2(&ctx, op);
+ break;
CASE_OP_32_64(ctpop):
done = fold_ctpop(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 23/56] tcg/optimize: Split out fold_brcond
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (21 preceding siblings ...)
2021-10-28 2:40 ` [PULL 22/56] tcg/optimize: Split out fold_brcond2 Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:40 ` [PULL 24/56] tcg/optimize: Split out fold_setcond Richard Henderson
` (33 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 33 +++++++++++++++++++--------------
1 file changed, 19 insertions(+), 14 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index c9db14f1d0..24ba6d2830 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -714,6 +714,22 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
+{
+ TCGCond cond = op->args[2];
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
+
+ if (i == 0) {
+ tcg_op_remove(ctx->tcg, op);
+ return true;
+ }
+ if (i > 0) {
+ op->opc = INDEX_op_br;
+ op->args[0] = op->args[3];
+ }
+ return false;
+}
+
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[4];
@@ -1641,20 +1657,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(brcond):
- i = do_constant_folding_cond(opc, op->args[0],
- op->args[1], op->args[2]);
- if (i == 0) {
- tcg_op_remove(s, op);
- continue;
- } else if (i > 0) {
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
- op->opc = opc = INDEX_op_br;
- op->args[0] = op->args[3];
- break;
- }
- break;
-
CASE_OP_32_64(movcond):
i = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[5]);
@@ -1737,6 +1739,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(andc):
done = fold_andc(&ctx, op);
break;
+ CASE_OP_32_64(brcond):
+ done = fold_brcond(&ctx, op);
+ break;
case INDEX_op_brcond2_i32:
done = fold_brcond2(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 24/56] tcg/optimize: Split out fold_setcond
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (22 preceding siblings ...)
2021-10-28 2:40 ` [PULL 23/56] tcg/optimize: Split out fold_brcond Richard Henderson
@ 2021-10-28 2:40 ` Richard Henderson
2021-10-28 2:41 ` [PULL 25/56] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
` (32 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:40 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 24ba6d2830..f79cb44944 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -948,6 +948,17 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
+{
+ TCGCond cond = op->args[3];
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
+
+ if (i >= 0) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+ }
+ return false;
+}
+
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[5];
@@ -1648,15 +1659,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(setcond):
- i = do_constant_folding_cond(opc, op->args[1],
- op->args[2], op->args[3]);
- if (i >= 0) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
- continue;
- }
- break;
-
CASE_OP_32_64(movcond):
i = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[5]);
@@ -1817,6 +1819,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(shr):
done = fold_shift(&ctx, op);
break;
+ CASE_OP_32_64(setcond):
+ done = fold_setcond(&ctx, op);
+ break;
case INDEX_op_setcond2_i32:
done = fold_setcond2(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 25/56] tcg/optimize: Split out fold_mulu2_i32
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (23 preceding siblings ...)
2021-10-28 2:40 ` [PULL 24/56] tcg/optimize: Split out fold_setcond Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 26/56] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
` (31 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 37 +++++++++++++++++++++----------------
1 file changed, 21 insertions(+), 16 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f79cb44944..805522f99d 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -899,6 +899,24 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
+ uint32_t a = arg_info(op->args[2])->val;
+ uint32_t b = arg_info(op->args[3])->val;
+ uint64_t r = (uint64_t)a * b;
+ TCGArg rl, rh;
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+
+ rl = op->args[0];
+ rh = op->args[1];
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
+ return true;
+ }
+ return false;
+}
+
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1710,22 +1728,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- case INDEX_op_mulu2_i32:
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint32_t a = arg_info(op->args[2])->val;
- uint32_t b = arg_info(op->args[3])->val;
- uint64_t r = (uint64_t)a * b;
- TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
-
- rl = op->args[0];
- rh = op->args[1];
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
- continue;
- }
- break;
-
default:
break;
@@ -1781,6 +1783,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(muluh):
done = fold_mul_highpart(&ctx, op);
break;
+ case INDEX_op_mulu2_i32:
+ done = fold_mulu2_i32(&ctx, op);
+ break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 26/56] tcg/optimize: Split out fold_addsub2_i32
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (24 preceding siblings ...)
2021-10-28 2:41 ` [PULL 25/56] tcg/optimize: Split out fold_mulu2_i32 Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 27/56] tcg/optimize: Split out fold_movcond Richard Henderson
` (30 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
which will not be simple wrappers forever.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
1 file changed, 44 insertions(+), 26 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 805522f99d..9d1d045363 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -704,6 +704,39 @@ static bool fold_add(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
+{
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
+ uint32_t al = arg_info(op->args[2])->val;
+ uint32_t ah = arg_info(op->args[3])->val;
+ uint32_t bl = arg_info(op->args[4])->val;
+ uint32_t bh = arg_info(op->args[5])->val;
+ uint64_t a = ((uint64_t)ah << 32) | al;
+ uint64_t b = ((uint64_t)bh << 32) | bl;
+ TCGArg rl, rh;
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+
+ if (add) {
+ a += b;
+ } else {
+ a -= b;
+ }
+
+ rl = op->args[0];
+ rh = op->args[1];
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
+ return true;
+ }
+ return false;
+}
+
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
+{
+ return fold_addsub2_i32(ctx, op, true);
+}
+
static bool fold_and(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1056,6 +1089,11 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
+{
+ return fold_addsub2_i32(ctx, op, false);
+}
+
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1701,32 +1739,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- case INDEX_op_add2_i32:
- case INDEX_op_sub2_i32:
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
- uint32_t al = arg_info(op->args[2])->val;
- uint32_t ah = arg_info(op->args[3])->val;
- uint32_t bl = arg_info(op->args[4])->val;
- uint32_t bh = arg_info(op->args[5])->val;
- uint64_t a = ((uint64_t)ah << 32) | al;
- uint64_t b = ((uint64_t)bh << 32) | bl;
- TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
-
- if (opc == INDEX_op_add2_i32) {
- a += b;
- } else {
- a -= b;
- }
-
- rl = op->args[0];
- rh = op->args[1];
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
- continue;
- }
- break;
default:
break;
@@ -1737,6 +1749,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(add):
done = fold_add(&ctx, op);
break;
+ case INDEX_op_add2_i32:
+ done = fold_add2_i32(&ctx, op);
+ break;
CASE_OP_32_64_VEC(and):
done = fold_and(&ctx, op);
break;
@@ -1833,6 +1848,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
done = fold_sub(&ctx, op);
break;
+ case INDEX_op_sub2_i32:
+ done = fold_sub2_i32(&ctx, op);
+ break;
CASE_OP_32_64_VEC(xor):
done = fold_xor(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 27/56] tcg/optimize: Split out fold_movcond
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (25 preceding siblings ...)
2021-10-28 2:41 ` [PULL 26/56] tcg/optimize: Split out fold_addsub2_i32 Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 28/56] tcg/optimize: Split out fold_extract2 Richard Henderson
` (29 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
1 file changed, 31 insertions(+), 25 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 9d1d045363..110b3d1cc2 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -922,6 +922,34 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
+{
+ TCGOpcode opc = op->opc;
+ TCGCond cond = op->args[5];
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
+
+ if (i >= 0) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
+ }
+
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
+ uint64_t tv = arg_info(op->args[3])->val;
+ uint64_t fv = arg_info(op->args[4])->val;
+
+ opc = (opc == INDEX_op_movcond_i32
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
+
+ if (tv == 1 && fv == 0) {
+ op->opc = opc;
+ op->args[3] = cond;
+ } else if (fv == 1 && tv == 0) {
+ op->opc = opc;
+ op->args[3] = tcg_invert_cond(cond);
+ }
+ }
+ return false;
+}
+
static bool fold_mul(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1715,31 +1743,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(movcond):
- i = do_constant_folding_cond(opc, op->args[1],
- op->args[2], op->args[5]);
- if (i >= 0) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
- continue;
- }
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
- uint64_t tv = arg_info(op->args[3])->val;
- uint64_t fv = arg_info(op->args[4])->val;
- TCGCond cond = op->args[5];
-
- if (fv == 1 && tv == 0) {
- cond = tcg_invert_cond(cond);
- } else if (!(tv == 1 && fv == 0)) {
- break;
- }
- op->args[3] = cond;
- op->opc = opc = (opc == INDEX_op_movcond_i32
- ? INDEX_op_setcond_i32
- : INDEX_op_setcond_i64);
- }
- break;
-
-
default:
break;
@@ -1791,6 +1794,9 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
+ CASE_OP_32_64(movcond):
+ done = fold_movcond(&ctx, op);
+ break;
CASE_OP_32_64(mul):
done = fold_mul(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 28/56] tcg/optimize: Split out fold_extract2
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (26 preceding siblings ...)
2021-10-28 2:41 ` [PULL 27/56] tcg/optimize: Split out fold_movcond Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 29/56] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
` (28 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
1 file changed, 22 insertions(+), 17 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 110b3d1cc2..faedbdbfb8 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -888,6 +888,25 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t v1 = arg_info(op->args[1])->val;
+ uint64_t v2 = arg_info(op->args[2])->val;
+ int shr = op->args[3];
+
+ if (op->opc == INDEX_op_extract2_i64) {
+ v1 >>= shr;
+ v2 <<= 64 - shr;
+ } else {
+ v1 = (uint32_t)v1 >> shr;
+ v2 = (int32_t)v2 << (32 - shr);
+ }
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
+ }
+ return false;
+}
+
static bool fold_exts(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
@@ -1726,23 +1745,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(extract2):
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- uint64_t v1 = arg_info(op->args[1])->val;
- uint64_t v2 = arg_info(op->args[2])->val;
- int shr = op->args[3];
-
- if (opc == INDEX_op_extract2_i64) {
- tmp = (v1 >> shr) | (v2 << (64 - shr));
- } else {
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
- ((uint32_t)v2 << (32 - shr)));
- }
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
default:
break;
@@ -1777,6 +1779,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(eqv):
done = fold_eqv(&ctx, op);
break;
+ CASE_OP_32_64(extract2):
+ done = fold_extract2(&ctx, op);
+ break;
CASE_OP_32_64(ext8s):
CASE_OP_32_64(ext16s):
case INDEX_op_ext32s_i64:
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 29/56] tcg/optimize: Split out fold_extract, fold_sextract
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (27 preceding siblings ...)
2021-10-28 2:41 ` [PULL 28/56] tcg/optimize: Split out fold_extract2 Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 30/56] tcg/optimize: Split out fold_deposit Richard Henderson
` (27 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
1 file changed, 30 insertions(+), 18 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index faedbdbfb8..3bd5f043c8 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -888,6 +888,18 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_extract(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t;
+
+ t = arg_info(op->args[1])->val;
+ t = extract64(t, op->args[2], op->args[3]);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
static bool fold_extract2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
@@ -1126,6 +1138,18 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t;
+
+ t = arg_info(op->args[1])->val;
+ t = sextract64(t, op->args[2], op->args[3]);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1727,24 +1751,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(extract):
- if (arg_is_const(op->args[1])) {
- tmp = extract64(arg_info(op->args[1])->val,
- op->args[2], op->args[3]);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
- CASE_OP_32_64(sextract):
- if (arg_is_const(op->args[1])) {
- tmp = sextract64(arg_info(op->args[1])->val,
- op->args[2], op->args[3]);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
default:
break;
@@ -1779,6 +1785,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(eqv):
done = fold_eqv(&ctx, op);
break;
+ CASE_OP_32_64(extract):
+ done = fold_extract(&ctx, op);
+ break;
CASE_OP_32_64(extract2):
done = fold_extract2(&ctx, op);
break;
@@ -1856,6 +1865,9 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_setcond2_i32:
done = fold_setcond2(&ctx, op);
break;
+ CASE_OP_32_64(sextract):
+ done = fold_sextract(&ctx, op);
+ break;
CASE_OP_32_64_VEC(sub):
done = fold_sub(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 30/56] tcg/optimize: Split out fold_deposit
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (28 preceding siblings ...)
2021-10-28 2:41 ` [PULL 29/56] tcg/optimize: Split out fold_extract, fold_sextract Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 31/56] tcg/optimize: Split out fold_count_zeros Richard Henderson
` (26 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 3bd5f043c8..2c57d08760 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -878,6 +878,18 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
return fold_const1(ctx, op);
}
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t t1 = arg_info(op->args[1])->val;
+ uint64_t t2 = arg_info(op->args[2])->val;
+
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+ }
+ return false;
+}
+
static bool fold_divide(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1741,16 +1753,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(deposit):
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tmp = deposit64(arg_info(op->args[1])->val,
- op->args[3], op->args[4],
- arg_info(op->args[2])->val);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
default:
break;
@@ -1778,6 +1780,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(ctpop):
done = fold_ctpop(&ctx, op);
break;
+ CASE_OP_32_64(deposit):
+ done = fold_deposit(&ctx, op);
+ break;
CASE_OP_32_64(div):
CASE_OP_32_64(divu):
done = fold_divide(&ctx, op);
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 31/56] tcg/optimize: Split out fold_count_zeros
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (29 preceding siblings ...)
2021-10-28 2:41 ` [PULL 30/56] tcg/optimize: Split out fold_deposit Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 32/56] tcg/optimize: Split out fold_bswap Richard Henderson
` (25 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 2c57d08760..dd65f1afcd 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -873,6 +873,20 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t = arg_info(op->args[1])->val;
+
+ if (t != 0) {
+ t = do_constant_folding(op->opc, t, 0);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
+ }
+ return false;
+}
+
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op);
@@ -1739,20 +1753,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(clz):
- CASE_OP_32_64(ctz):
- if (arg_is_const(op->args[1])) {
- TCGArg v = arg_info(op->args[1])->val;
- if (v != 0) {
- tmp = do_constant_folding(opc, v, 0);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- } else {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
- }
- continue;
- }
- break;
-
default:
break;
@@ -1777,6 +1777,10 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_brcond2_i32:
done = fold_brcond2(&ctx, op);
break;
+ CASE_OP_32_64(clz):
+ CASE_OP_32_64(ctz):
+ done = fold_count_zeros(&ctx, op);
+ break;
CASE_OP_32_64(ctpop):
done = fold_ctpop(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 32/56] tcg/optimize: Split out fold_bswap
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (30 preceding siblings ...)
2021-10-28 2:41 ` [PULL 31/56] tcg/optimize: Split out fold_count_zeros Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 33/56] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
` (24 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index dd65f1afcd..5374c230da 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -841,6 +841,17 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t = arg_info(op->args[1])->val;
+
+ t = do_constant_folding(op->opc, t, op->args[2]);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@@ -1742,17 +1753,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(bswap16):
- CASE_OP_32_64(bswap32):
- case INDEX_op_bswap64_i64:
- if (arg_is_const(op->args[1])) {
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
- op->args[2]);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
default:
break;
@@ -1777,6 +1777,11 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_brcond2_i32:
done = fold_brcond2(&ctx, op);
break;
+ CASE_OP_32_64(bswap16):
+ CASE_OP_32_64(bswap32):
+ case INDEX_op_bswap64_i64:
+ done = fold_bswap(&ctx, op);
+ break;
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
done = fold_count_zeros(&ctx, op);
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 33/56] tcg/optimize: Split out fold_dup, fold_dup2
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (31 preceding siblings ...)
2021-10-28 2:41 ` [PULL 32/56] tcg/optimize: Split out fold_bswap Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 34/56] tcg/optimize: Split out fold_mov Richard Henderson
` (23 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
1 file changed, 31 insertions(+), 22 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 5374c230da..8524fe1f8a 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -920,6 +920,31 @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_dup(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t = arg_info(op->args[1])->val;
+ t = dup_const(TCGOP_VECE(op), t);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
+ arg_info(op->args[2])->val);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+
+ if (args_are_copies(op->args[1], op->args[2])) {
+ op->opc = INDEX_op_dup_vec;
+ TCGOP_VECE(op) = MO_32;
+ }
+ return false;
+}
+
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1731,28 +1756,6 @@ void tcg_optimize(TCGContext *s)
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
break;
- case INDEX_op_dup_vec:
- if (arg_is_const(op->args[1])) {
- tmp = arg_info(op->args[1])->val;
- tmp = dup_const(TCGOP_VECE(op), tmp);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
- case INDEX_op_dup2_vec:
- assert(TCG_TARGET_REG_BITS == 32);
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tcg_opt_gen_movi(&ctx, op, op->args[0],
- deposit64(arg_info(op->args[1])->val, 32, 32,
- arg_info(op->args[2])->val));
- continue;
- } else if (args_are_copies(op->args[1], op->args[2])) {
- op->opc = INDEX_op_dup_vec;
- TCGOP_VECE(op) = MO_32;
- }
- break;
-
default:
break;
@@ -1796,6 +1799,12 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(divu):
done = fold_divide(&ctx, op);
break;
+ case INDEX_op_dup_vec:
+ done = fold_dup(&ctx, op);
+ break;
+ case INDEX_op_dup2_vec:
+ done = fold_dup2(&ctx, op);
+ break;
CASE_OP_32_64(eqv):
done = fold_eqv(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 34/56] tcg/optimize: Split out fold_mov
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (32 preceding siblings ...)
2021-10-28 2:41 ` [PULL 33/56] tcg/optimize: Split out fold_dup, fold_dup2 Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 35/56] tcg/optimize: Split out fold_xx_to_i Richard Henderson
` (22 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
This is the final entry in the main switch that was in a
different form. After this, we have the option to convert
the switch into a function dispatch table.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 8524fe1f8a..5f1bd7cd78 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1015,6 +1015,11 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_mov(OptContext *ctx, TCGOp *op)
+{
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+}
+
static bool fold_movcond(OptContext *ctx, TCGOp *op)
{
TCGOpcode opc = op->opc;
@@ -1748,20 +1753,11 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Propagate constants through copy operations and do constant
- folding. Constants will be substituted to arguments by register
- allocator where needed and possible. Also detect copies. */
+ /*
+ * Process each opcode.
+ * Sorted alphabetically by opcode as much as possible.
+ */
switch (opc) {
- CASE_OP_32_64_VEC(mov):
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- break;
-
- default:
- break;
-
- /* ---------------------------------------------------------- */
- /* Sorted alphabetically by opcode as much as possible. */
-
CASE_OP_32_64_VEC(add):
done = fold_add(&ctx, op);
break;
@@ -1831,6 +1827,9 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
+ CASE_OP_32_64_VEC(mov):
+ done = fold_mov(&ctx, op);
+ break;
CASE_OP_32_64(movcond):
done = fold_movcond(&ctx, op);
break;
@@ -1900,6 +1899,8 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(xor):
done = fold_xor(&ctx, op);
break;
+ default:
+ break;
}
if (!done) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 35/56] tcg/optimize: Split out fold_xx_to_i
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (33 preceding siblings ...)
2021-10-28 2:41 ` [PULL 34/56] tcg/optimize: Split out fold_mov Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 36/56] tcg/optimize: Split out fold_xx_to_x Richard Henderson
` (21 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Pull the "op r, a, a => movi r, 0" optimization into a function,
and use it in the outer opcode fold functions.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
1 file changed, 24 insertions(+), 17 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 5f1bd7cd78..2f55dc56c0 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -695,6 +695,15 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+/* If the binary operation has both arguments equal, fold to @i. */
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (args_are_copies(op->args[1], op->args[2])) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+ }
+ return false;
+}
+
/*
* These outermost fold_<op> functions are sorted alphabetically.
*/
@@ -744,7 +753,11 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_brcond(OptContext *ctx, TCGOp *op)
@@ -1224,7 +1237,11 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
static bool fold_sub(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
@@ -1234,7 +1251,11 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
/* Propagate constants and copies, fold constant expressions. */
@@ -1739,20 +1760,6 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(andc):
- CASE_OP_32_64_VEC(sub):
- CASE_OP_32_64_VEC(xor):
- if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
- continue;
- }
- break;
- default:
- break;
- }
-
/*
* Process each opcode.
* Sorted alphabetically by opcode as much as possible.
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 36/56] tcg/optimize: Split out fold_xx_to_x
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (34 preceding siblings ...)
2021-10-28 2:41 ` [PULL 35/56] tcg/optimize: Split out fold_xx_to_i Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 37/56] tcg/optimize: Split out fold_xi_to_i Richard Henderson
` (20 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Pull the "op r, a, a => mov r, a" optimization into a function,
and use it in the outer opcode fold functions.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
1 file changed, 24 insertions(+), 15 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 2f55dc56c0..ab96849edf 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -704,8 +704,22 @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
return false;
}
+/* If the binary operation has both arguments equal, fold to identity. */
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
+{
+ if (args_are_copies(op->args[1], op->args[2])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ return false;
+}
+
/*
* These outermost fold_<op> functions are sorted alphabetically.
+ *
+ * The ordering of the transformations should be:
+ * 1) those that produce a constant
+ * 2) those that produce a copy
+ * 3) those that produce information about the result value.
*/
static bool fold_add(OptContext *ctx, TCGOp *op)
@@ -748,7 +762,11 @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
static bool fold_and(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_x(ctx, op)) {
+ return true;
+ }
+ return false;
}
static bool fold_andc(OptContext *ctx, TCGOp *op)
@@ -1111,7 +1129,11 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
static bool fold_or(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_x(ctx, op)) {
+ return true;
+ }
+ return false;
}
static bool fold_orc(OptContext *ctx, TCGOp *op)
@@ -1747,19 +1769,6 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify expression for "op r, a, a => mov r, a" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(or):
- CASE_OP_32_64_VEC(and):
- if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
- break;
- default:
- break;
- }
-
/*
* Process each opcode.
* Sorted alphabetically by opcode as much as possible.
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 37/56] tcg/optimize: Split out fold_xi_to_i
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (35 preceding siblings ...)
2021-10-28 2:41 ` [PULL 36/56] tcg/optimize: Split out fold_xx_to_x Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 38/56] tcg/optimize: Add type to OptContext Richard Henderson
` (19 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
and use it in the outer opcode fold functions.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index ab96849edf..cfdc53c964 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -695,6 +695,15 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+/* If the binary operation has second argument @i, fold to @i. */
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+ }
+ return false;
+}
+
/* If the binary operation has both arguments equal, fold to @i. */
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -763,6 +772,7 @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
static bool fold_and(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_i(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -1081,12 +1091,20 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
static bool fold_mul(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
@@ -1753,22 +1771,6 @@ void tcg_optimize(TCGContext *s)
continue;
}
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(and):
- CASE_OP_32_64_VEC(mul):
- CASE_OP_32_64(muluh):
- CASE_OP_32_64(mulsh):
- if (arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
- continue;
- }
- break;
- default:
- break;
- }
-
/*
* Process each opcode.
* Sorted alphabetically by opcode as much as possible.
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 38/56] tcg/optimize: Add type to OptContext
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (36 preceding siblings ...)
2021-10-28 2:41 ` [PULL 37/56] tcg/optimize: Split out fold_xi_to_i Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 39/56] tcg/optimize: Split out fold_to_not Richard Henderson
` (18 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Compute the type of the operation early.
There are at least 4 places that used a def->flags ladder
to determine the type of the operation being optimized.
There were two places that assumed !TCG_OPF_64BIT means
TCG_TYPE_I32, and so could potentially compute incorrect
results for vector operations.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
1 file changed, 89 insertions(+), 60 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index cfdc53c964..e869fa7e78 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -51,6 +51,7 @@ typedef struct OptContext {
/* In flight values from optimization. */
uint64_t z_mask;
+ TCGType type;
} OptContext;
static inline TempOptInfo *ts_info(TCGTemp *ts)
@@ -187,7 +188,6 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
TCGTemp *src_ts = arg_temp(src);
- const TCGOpDef *def;
TempOptInfo *di;
TempOptInfo *si;
uint64_t z_mask;
@@ -201,16 +201,24 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
reset_ts(dst_ts);
di = ts_info(dst_ts);
si = ts_info(src_ts);
- def = &tcg_op_defs[op->opc];
- if (def->flags & TCG_OPF_VECTOR) {
- new_op = INDEX_op_mov_vec;
- } else if (def->flags & TCG_OPF_64BIT) {
- new_op = INDEX_op_mov_i64;
- } else {
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
new_op = INDEX_op_mov_i32;
+ break;
+ case TCG_TYPE_I64:
+ new_op = INDEX_op_mov_i64;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
+ new_op = INDEX_op_mov_vec;
+ break;
+ default:
+ g_assert_not_reached();
}
op->opc = new_op;
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
op->args[0] = dst;
op->args[1] = src;
@@ -237,20 +245,9 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
TCGArg dst, uint64_t val)
{
- const TCGOpDef *def = &tcg_op_defs[op->opc];
- TCGType type;
- TCGTemp *tv;
-
- if (def->flags & TCG_OPF_VECTOR) {
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
- } else if (def->flags & TCG_OPF_64BIT) {
- type = TCG_TYPE_I64;
- } else {
- type = TCG_TYPE_I32;
- }
-
/* Convert movi to mov with constant temp. */
- tv = tcg_constant_internal(type, val);
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
+
init_ts_info(ctx, tv);
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
@@ -420,11 +417,11 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
}
}
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
+ uint64_t x, uint64_t y)
{
- const TCGOpDef *def = &tcg_op_defs[op];
uint64_t res = do_constant_folding_2(op, x, y);
- if (!(def->flags & TCG_OPF_64BIT)) {
+ if (type == TCG_TYPE_I32) {
res = (int32_t)res;
}
return res;
@@ -510,19 +507,21 @@ static bool do_constant_folding_cond_eq(TCGCond c)
* Return -1 if the condition can't be simplified,
* and the result of the condition (0 or 1) if it can.
*/
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
+static int do_constant_folding_cond(TCGType type, TCGArg x,
TCGArg y, TCGCond c)
{
uint64_t xv = arg_info(x)->val;
uint64_t yv = arg_info(y)->val;
if (arg_is_const(x) && arg_is_const(y)) {
- const TCGOpDef *def = &tcg_op_defs[op];
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
- if (def->flags & TCG_OPF_64BIT) {
- return do_constant_folding_cond_64(xv, yv, c);
- } else {
+ switch (type) {
+ case TCG_TYPE_I32:
return do_constant_folding_cond_32(xv, yv, c);
+ case TCG_TYPE_I64:
+ return do_constant_folding_cond_64(xv, yv, c);
+ default:
+ /* Only scalar comparisons are optimizable */
+ return -1;
}
} else if (args_are_copies(x, y)) {
return do_constant_folding_cond_eq(c);
@@ -677,7 +676,7 @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
uint64_t t;
t = arg_info(op->args[1])->val;
- t = do_constant_folding(op->opc, t, 0);
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
return false;
@@ -689,7 +688,7 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
uint64_t t1 = arg_info(op->args[1])->val;
uint64_t t2 = arg_info(op->args[2])->val;
- t1 = do_constant_folding(op->opc, t1, t2);
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
}
return false;
@@ -791,7 +790,7 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
static bool fold_brcond(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[2];
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
if (i == 0) {
tcg_op_remove(ctx->tcg, op);
@@ -836,7 +835,7 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
* Simplify EQ/NE comparisons where one of the pairs
* can be simplified.
*/
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
op->args[2], cond);
switch (i ^ inv) {
case 0:
@@ -845,7 +844,7 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
goto do_brcond_high;
}
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
op->args[3], cond);
switch (i ^ inv) {
case 0:
@@ -887,7 +886,7 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
if (arg_is_const(op->args[1])) {
uint64_t t = arg_info(op->args[1])->val;
- t = do_constant_folding(op->opc, t, op->args[2]);
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
return false;
@@ -931,7 +930,7 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
uint64_t t = arg_info(op->args[1])->val;
if (t != 0) {
- t = do_constant_folding(op->opc, t, 0);
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
@@ -1063,9 +1062,8 @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
static bool fold_movcond(OptContext *ctx, TCGOp *op)
{
- TCGOpcode opc = op->opc;
TCGCond cond = op->args[5];
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
if (i >= 0) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
@@ -1074,9 +1072,18 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
uint64_t fv = arg_info(op->args[4])->val;
+ TCGOpcode opc;
- opc = (opc == INDEX_op_movcond_i32
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ opc = INDEX_op_setcond_i32;
+ break;
+ case TCG_TYPE_I64:
+ opc = INDEX_op_setcond_i64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
if (tv == 1 && fv == 0) {
op->opc = opc;
@@ -1181,7 +1188,7 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
static bool fold_setcond(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[3];
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
if (i >= 0) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
@@ -1220,7 +1227,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
* Simplify EQ/NE comparisons where one of the pairs
* can be simplified.
*/
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
op->args[3], cond);
switch (i ^ inv) {
case 0:
@@ -1229,7 +1236,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
goto do_setcond_high;
}
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
op->args[4], cond);
switch (i ^ inv) {
case 0:
@@ -1331,6 +1338,15 @@ void tcg_optimize(TCGContext *s)
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
+ /* Pre-compute the type of the operation. */
+ if (def->flags & TCG_OPF_VECTOR) {
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
+ } else if (def->flags & TCG_OPF_64BIT) {
+ ctx.type = TCG_TYPE_I64;
+ } else {
+ ctx.type = TCG_TYPE_I32;
+ }
+
/* For commutative operations make constant second argument */
switch (opc) {
CASE_OP_32_64_VEC(add):
@@ -1411,19 +1427,24 @@ void tcg_optimize(TCGContext *s)
/* Proceed with possible constant folding. */
break;
}
- if (opc == INDEX_op_sub_i32) {
+ switch (ctx.type) {
+ case TCG_TYPE_I32:
neg_op = INDEX_op_neg_i32;
have_neg = TCG_TARGET_HAS_neg_i32;
- } else if (opc == INDEX_op_sub_i64) {
+ break;
+ case TCG_TYPE_I64:
neg_op = INDEX_op_neg_i64;
have_neg = TCG_TARGET_HAS_neg_i64;
- } else if (TCG_TARGET_HAS_neg_vec) {
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
- unsigned vece = TCGOP_VECE(op);
- neg_op = INDEX_op_neg_vec;
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
- } else {
break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ neg_op = INDEX_op_neg_vec;
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
+ TCGOP_VECE(op)) > 0;
+ break;
+ default:
+ g_assert_not_reached();
}
if (!have_neg) {
break;
@@ -1476,15 +1497,23 @@ void tcg_optimize(TCGContext *s)
TCGOpcode not_op;
bool have_not;
- if (def->flags & TCG_OPF_VECTOR) {
- not_op = INDEX_op_not_vec;
- have_not = TCG_TARGET_HAS_not_vec;
- } else if (def->flags & TCG_OPF_64BIT) {
- not_op = INDEX_op_not_i64;
- have_not = TCG_TARGET_HAS_not_i64;
- } else {
+ switch (ctx.type) {
+ case TCG_TYPE_I32:
not_op = INDEX_op_not_i32;
have_not = TCG_TARGET_HAS_not_i32;
+ break;
+ case TCG_TYPE_I64:
+ not_op = INDEX_op_not_i64;
+ have_not = TCG_TARGET_HAS_not_i64;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ not_op = INDEX_op_not_vec;
+ have_not = TCG_TARGET_HAS_not_vec;
+ break;
+ default:
+ g_assert_not_reached();
}
if (!have_not) {
break;
@@ -1755,7 +1784,7 @@ void tcg_optimize(TCGContext *s)
below, we can ignore high bits, but for further optimizations we
need to record that the high bits contain garbage. */
partmask = z_mask;
- if (!(def->flags & TCG_OPF_64BIT)) {
+ if (ctx.type == TCG_TYPE_I32) {
z_mask |= ~(tcg_target_ulong)0xffffffffu;
partmask &= 0xffffffffu;
affected &= 0xffffffffu;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 39/56] tcg/optimize: Split out fold_to_not
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (37 preceding siblings ...)
2021-10-28 2:41 ` [PULL 38/56] tcg/optimize: Add type to OptContext Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 40/56] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
` (17 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Split out the conditional conversion from a more complex logical
operation to a simple NOT. Create a couple more helpers to make
this easy for the outer-most logical operations.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
1 file changed, 86 insertions(+), 72 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e869fa7e78..21f4251b4f 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -694,6 +694,52 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+/*
+ * Convert @op to NOT, if NOT is supported by the host.
+ * Return true f the conversion is successful, which will still
+ * indicate that the processing is complete.
+ */
+static bool fold_not(OptContext *ctx, TCGOp *op);
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
+{
+ TCGOpcode not_op;
+ bool have_not;
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ not_op = INDEX_op_not_i32;
+ have_not = TCG_TARGET_HAS_not_i32;
+ break;
+ case TCG_TYPE_I64:
+ not_op = INDEX_op_not_i64;
+ have_not = TCG_TARGET_HAS_not_i64;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ not_op = INDEX_op_not_vec;
+ have_not = TCG_TARGET_HAS_not_vec;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (have_not) {
+ op->opc = not_op;
+ op->args[1] = op->args[idx];
+ return fold_not(ctx, op);
+ }
+ return false;
+}
+
+/* If the binary operation has first argument @i, fold to NOT. */
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+ return fold_to_not(ctx, op, 2);
+ }
+ return false;
+}
+
/* If the binary operation has second argument @i, fold to @i. */
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -703,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
return false;
}
+/* If the binary operation has second argument @i, fold to NOT. */
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return fold_to_not(ctx, op, 1);
+ }
+ return false;
+}
+
/* If the binary operation has both arguments equal, fold to @i. */
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -781,7 +836,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0)) {
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_ix_to_not(ctx, op, -1)) {
return true;
}
return false;
@@ -987,7 +1043,11 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_extract(OptContext *ctx, TCGOp *op)
@@ -1134,7 +1194,11 @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, -1)) {
+ return true;
+ }
+ return false;
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
@@ -1144,12 +1208,22 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_not(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+
+ /* Because of fold_to_not, we want to always return true, via finish. */
+ finish_folding(ctx, op);
+ return true;
}
static bool fold_or(OptContext *ctx, TCGOp *op)
@@ -1163,7 +1237,11 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_ix_to_not(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
@@ -1299,7 +1377,8 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0)) {
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_not(ctx, op, -1)) {
return true;
}
return false;
@@ -1458,71 +1537,6 @@ void tcg_optimize(TCGContext *s)
}
}
break;
- CASE_OP_32_64_VEC(xor):
- CASE_OP_32_64(nand):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == -1) {
- i = 1;
- goto try_not;
- }
- break;
- CASE_OP_32_64(nor):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- i = 1;
- goto try_not;
- }
- break;
- CASE_OP_32_64_VEC(andc):
- if (!arg_is_const(op->args[2])
- && arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == -1) {
- i = 2;
- goto try_not;
- }
- break;
- CASE_OP_32_64_VEC(orc):
- CASE_OP_32_64(eqv):
- if (!arg_is_const(op->args[2])
- && arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == 0) {
- i = 2;
- goto try_not;
- }
- break;
- try_not:
- {
- TCGOpcode not_op;
- bool have_not;
-
- switch (ctx.type) {
- case TCG_TYPE_I32:
- not_op = INDEX_op_not_i32;
- have_not = TCG_TARGET_HAS_not_i32;
- break;
- case TCG_TYPE_I64:
- not_op = INDEX_op_not_i64;
- have_not = TCG_TARGET_HAS_not_i64;
- break;
- case TCG_TYPE_V64:
- case TCG_TYPE_V128:
- case TCG_TYPE_V256:
- not_op = INDEX_op_not_vec;
- have_not = TCG_TARGET_HAS_not_vec;
- break;
- default:
- g_assert_not_reached();
- }
- if (!have_not) {
- break;
- }
- op->opc = not_op;
- reset_temp(op->args[0]);
- op->args[1] = op->args[i];
- continue;
- }
default:
break;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 40/56] tcg/optimize: Split out fold_sub_to_neg
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (38 preceding siblings ...)
2021-10-28 2:41 ` [PULL 39/56] tcg/optimize: Split out fold_to_not Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 41/56] tcg/optimize: Split out fold_xi_to_x Richard Henderson
` (16 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Even though there is only one user, place this more complex
conversion into its own helper.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
1 file changed, 47 insertions(+), 42 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 21f4251b4f..e0d850ffe4 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1203,7 +1203,15 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
static bool fold_neg(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+ /*
+ * Because of fold_sub_to_neg, we want to always return true,
+ * via finish_folding.
+ */
+ finish_folding(ctx, op);
+ return true;
}
static bool fold_nor(OptContext *ctx, TCGOp *op)
@@ -1360,10 +1368,47 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
+{
+ TCGOpcode neg_op;
+ bool have_neg;
+
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
+ return false;
+ }
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ neg_op = INDEX_op_neg_i32;
+ have_neg = TCG_TARGET_HAS_neg_i32;
+ break;
+ case TCG_TYPE_I64:
+ neg_op = INDEX_op_neg_i64;
+ have_neg = TCG_TARGET_HAS_neg_i64;
+ break;
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ neg_op = INDEX_op_neg_vec;
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (have_neg) {
+ op->opc = neg_op;
+ op->args[1] = op->args[2];
+ return fold_neg(ctx, op);
+ }
+ return false;
+}
+
static bool fold_sub(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0)) {
+ fold_xx_to_i(ctx, op, 0) ||
+ fold_sub_to_neg(ctx, op)) {
return true;
}
return false;
@@ -1497,46 +1542,6 @@ void tcg_optimize(TCGContext *s)
continue;
}
break;
- CASE_OP_32_64_VEC(sub):
- {
- TCGOpcode neg_op;
- bool have_neg;
-
- if (arg_is_const(op->args[2])) {
- /* Proceed with possible constant folding. */
- break;
- }
- switch (ctx.type) {
- case TCG_TYPE_I32:
- neg_op = INDEX_op_neg_i32;
- have_neg = TCG_TARGET_HAS_neg_i32;
- break;
- case TCG_TYPE_I64:
- neg_op = INDEX_op_neg_i64;
- have_neg = TCG_TARGET_HAS_neg_i64;
- break;
- case TCG_TYPE_V64:
- case TCG_TYPE_V128:
- case TCG_TYPE_V256:
- neg_op = INDEX_op_neg_vec;
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
- TCGOP_VECE(op)) > 0;
- break;
- default:
- g_assert_not_reached();
- }
- if (!have_neg) {
- break;
- }
- if (arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == 0) {
- op->opc = neg_op;
- reset_temp(op->args[0]);
- op->args[1] = op->args[2];
- continue;
- }
- }
- break;
default:
break;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 41/56] tcg/optimize: Split out fold_xi_to_x
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (39 preceding siblings ...)
2021-10-28 2:41 ` [PULL 40/56] tcg/optimize: Split out fold_sub_to_neg Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 42/56] tcg/optimize: Split out fold_ix_to_i Richard Henderson
` (15 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Pull the "op r, a, i => mov r, a" optimization into a function,
and use them in the outer-most logical operations.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
1 file changed, 26 insertions(+), 35 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e0d850ffe4..f5ab0500b7 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -749,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
return false;
}
+/* If the binary operation has second argument @i, fold to identity. */
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ return false;
+}
+
/* If the binary operation has second argument @i, fold to NOT. */
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -787,7 +796,11 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
static bool fold_add(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
@@ -827,6 +840,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xi_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, -1) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -837,6 +851,7 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_ix_to_not(ctx, op, -1)) {
return true;
}
@@ -1044,6 +1059,7 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, -1) ||
fold_xi_to_not(ctx, op, 0)) {
return true;
}
@@ -1237,6 +1253,7 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
static bool fold_or(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -1246,6 +1263,7 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, -1) ||
fold_ix_to_not(ctx, op, 0)) {
return true;
}
@@ -1365,7 +1383,11 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
@@ -1408,6 +1430,7 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_sub_to_neg(ctx, op)) {
return true;
}
@@ -1423,6 +1446,7 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 0) ||
fold_xi_to_not(ctx, op, -1)) {
return true;
}
@@ -1546,39 +1570,6 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify expression for "op r, a, const => mov r, a" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(add):
- CASE_OP_32_64_VEC(sub):
- CASE_OP_32_64_VEC(or):
- CASE_OP_32_64_VEC(xor):
- CASE_OP_32_64_VEC(andc):
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
- break;
- CASE_OP_32_64_VEC(and):
- CASE_OP_32_64_VEC(orc):
- CASE_OP_32_64(eqv):
- if (!arg_is_const(op->args[1])
- && arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == -1) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
- break;
- default:
- break;
- }
-
/* Simplify using known-zero bits. Currently only ops with a single
output argument is supported. */
z_mask = -1;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 42/56] tcg/optimize: Split out fold_ix_to_i
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (40 preceding siblings ...)
2021-10-28 2:41 ` [PULL 41/56] tcg/optimize: Split out fold_xi_to_x Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 43/56] tcg/optimize: Split out fold_masks Richard Henderson
` (14 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Pull the "op r, 0, b => movi r, 0" optimization into a function,
and use it in fold_shift.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 28 ++++++++++------------------
1 file changed, 10 insertions(+), 18 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f5ab0500b7..bf74b77355 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -731,6 +731,15 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
return false;
}
+/* If the binary operation has first argument @i, fold to @i. */
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+ }
+ return false;
+}
+
/* If the binary operation has first argument @i, fold to NOT. */
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -1384,6 +1393,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_ix_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0)) {
return true;
}
@@ -1552,24 +1562,6 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
- and "sub r, 0, a => neg r, a" case. */
- switch (opc) {
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- if (arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == 0) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
- continue;
- }
- break;
- default:
- break;
- }
-
/* Simplify using known-zero bits. Currently only ops with a single
output argument is supported. */
z_mask = -1;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 43/56] tcg/optimize: Split out fold_masks
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (41 preceding siblings ...)
2021-10-28 2:41 ` [PULL 42/56] tcg/optimize: Split out fold_ix_to_i Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 44/56] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
` (13 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Move all of the known-zero optimizations into the per-opcode
functions. Use fold_masks when there is a possibility of the
result being determined, and simply set ctx->z_mask otherwise.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
1 file changed, 294 insertions(+), 251 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index bf74b77355..e84d10be53 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -50,7 +50,8 @@ typedef struct OptContext {
TCGTempSet temps_used;
/* In flight values from optimization. */
- uint64_t z_mask;
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
TCGType type;
} OptContext;
@@ -694,6 +695,31 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_masks(OptContext *ctx, TCGOp *op)
+{
+ uint64_t a_mask = ctx->a_mask;
+ uint64_t z_mask = ctx->z_mask;
+
+ /*
+ * 32-bit ops generate 32-bit results. For the result is zero test
+ * below, we can ignore high bits, but for further optimizations we
+ * need to record that the high bits contain garbage.
+ */
+ if (ctx->type == TCG_TYPE_I32) {
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
+ a_mask &= MAKE_64BIT_MASK(0, 32);
+ z_mask &= MAKE_64BIT_MASK(0, 32);
+ }
+
+ if (z_mask == 0) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
+ }
+ if (a_mask == 0) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ return false;
+}
+
/*
* Convert @op to NOT, if NOT is supported by the host.
* Return true f the conversion is successful, which will still
@@ -847,24 +873,55 @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
static bool fold_and(OptContext *ctx, TCGOp *op)
{
+ uint64_t z1, z2;
+
if (fold_const2(ctx, op) ||
fold_xi_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, -1) ||
fold_xx_to_x(ctx, op)) {
return true;
}
- return false;
+
+ z1 = arg_info(op->args[1])->z_mask;
+ z2 = arg_info(op->args[2])->z_mask;
+ ctx->z_mask = z1 & z2;
+
+ /*
+ * Known-zeros does not imply known-ones. Therefore unless
+ * arg2 is constant, we can't infer affected bits from it.
+ */
+ if (arg_is_const(op->args[2])) {
+ ctx->a_mask = z1 & ~z2;
+ }
+
+ return fold_masks(ctx, op);
}
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
+ uint64_t z1;
+
if (fold_const2(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
fold_ix_to_not(ctx, op, -1)) {
return true;
}
- return false;
+
+ z1 = arg_info(op->args[1])->z_mask;
+
+ /*
+ * Known-zeros does not imply known-ones. Therefore unless
+ * arg2 is constant, we can't infer anything from it.
+ */
+ if (arg_is_const(op->args[2])) {
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
+ ctx->a_mask = z1 & ~z2;
+ z1 &= z2;
+ }
+ ctx->z_mask = z1;
+
+ return fold_masks(ctx, op);
}
static bool fold_brcond(OptContext *ctx, TCGOp *op)
@@ -963,13 +1020,52 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
static bool fold_bswap(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask, sign;
+
if (arg_is_const(op->args[1])) {
uint64_t t = arg_info(op->args[1])->val;
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
- return false;
+
+ z_mask = arg_info(op->args[1])->z_mask;
+ switch (op->opc) {
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ z_mask = bswap16(z_mask);
+ sign = INT16_MIN;
+ break;
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ z_mask = bswap32(z_mask);
+ sign = INT32_MIN;
+ break;
+ case INDEX_op_bswap64_i64:
+ z_mask = bswap64(z_mask);
+ sign = INT64_MIN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
+ case TCG_BSWAP_OZ:
+ break;
+ case TCG_BSWAP_OS:
+ /* If the sign bit may be 1, force all the bits above to 1. */
+ if (z_mask & sign) {
+ z_mask |= sign;
+ }
+ break;
+ default:
+ /* The high bits are undefined: force all bits above the sign to 1. */
+ z_mask |= sign << 1;
+ break;
+ }
+ ctx->z_mask = z_mask;
+
+ return fold_masks(ctx, op);
}
static bool fold_call(OptContext *ctx, TCGOp *op)
@@ -1006,6 +1102,8 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask;
+
if (arg_is_const(op->args[1])) {
uint64_t t = arg_info(op->args[1])->val;
@@ -1015,12 +1113,39 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
}
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
}
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ z_mask = 31;
+ break;
+ case TCG_TYPE_I64:
+ z_mask = 63;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
+
return false;
}
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ ctx->z_mask = 32 | 31;
+ break;
+ case TCG_TYPE_I64:
+ ctx->z_mask = 64 | 63;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
}
static bool fold_deposit(OptContext *ctx, TCGOp *op)
@@ -1032,6 +1157,10 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
t1 = deposit64(t1, op->args[3], op->args[4], t2);
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
}
+
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
+ op->args[3], op->args[4],
+ arg_info(op->args[2])->z_mask);
return false;
}
@@ -1077,6 +1206,8 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
static bool fold_extract(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask_old, z_mask;
+
if (arg_is_const(op->args[1])) {
uint64_t t;
@@ -1084,7 +1215,15 @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
t = extract64(t, op->args[2], op->args[3]);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
- return false;
+
+ z_mask_old = arg_info(op->args[1])->z_mask;
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
+ if (op->args[2] == 0) {
+ ctx->a_mask = z_mask_old ^ z_mask;
+ }
+ ctx->z_mask = z_mask;
+
+ return fold_masks(ctx, op);
}
static bool fold_extract2(OptContext *ctx, TCGOp *op)
@@ -1108,12 +1247,83 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
static bool fold_exts(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ uint64_t z_mask_old, z_mask, sign;
+ bool type_change = false;
+
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
+
+ switch (op->opc) {
+ CASE_OP_32_64(ext8s):
+ sign = INT8_MIN;
+ z_mask = (uint8_t)z_mask;
+ break;
+ CASE_OP_32_64(ext16s):
+ sign = INT16_MIN;
+ z_mask = (uint16_t)z_mask;
+ break;
+ case INDEX_op_ext_i32_i64:
+ type_change = true;
+ QEMU_FALLTHROUGH;
+ case INDEX_op_ext32s_i64:
+ sign = INT32_MIN;
+ z_mask = (uint32_t)z_mask;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (z_mask & sign) {
+ z_mask |= sign;
+ } else if (!type_change) {
+ ctx->a_mask = z_mask_old ^ z_mask;
+ }
+ ctx->z_mask = z_mask;
+
+ return fold_masks(ctx, op);
}
static bool fold_extu(OptContext *ctx, TCGOp *op)
{
- return fold_const1(ctx, op);
+ uint64_t z_mask_old, z_mask;
+ bool type_change = false;
+
+ if (fold_const1(ctx, op)) {
+ return true;
+ }
+
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
+
+ switch (op->opc) {
+ CASE_OP_32_64(ext8u):
+ z_mask = (uint8_t)z_mask;
+ break;
+ CASE_OP_32_64(ext16u):
+ z_mask = (uint16_t)z_mask;
+ break;
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extu_i32_i64:
+ type_change = true;
+ QEMU_FALLTHROUGH;
+ case INDEX_op_ext32u_i64:
+ z_mask = (uint32_t)z_mask;
+ break;
+ case INDEX_op_extrh_i64_i32:
+ type_change = true;
+ z_mask >>= 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ctx->z_mask = z_mask;
+ if (!type_change) {
+ ctx->a_mask = z_mask_old ^ z_mask;
+ }
+ return fold_masks(ctx, op);
}
static bool fold_mb(OptContext *ctx, TCGOp *op)
@@ -1154,6 +1364,9 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
}
+ ctx->z_mask = arg_info(op->args[3])->z_mask
+ | arg_info(op->args[4])->z_mask;
+
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
uint64_t fv = arg_info(op->args[4])->val;
@@ -1228,9 +1441,16 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
static bool fold_neg(OptContext *ctx, TCGOp *op)
{
+ uint64_t z_mask;
+
if (fold_const1(ctx, op)) {
return true;
}
+
+ /* Set to 1 all bits to the left of the rightmost. */
+ z_mask = arg_info(op->args[1])->z_mask;
+ ctx->z_mask = -(z_mask & -z_mask);
+
/*
* Because of fold_sub_to_neg, we want to always return true,
* via finish_folding.
@@ -1266,7 +1486,10 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
fold_xx_to_x(ctx, op)) {
return true;
}
- return false;
+
+ ctx->z_mask = arg_info(op->args[1])->z_mask
+ | arg_info(op->args[2])->z_mask;
+ return fold_masks(ctx, op);
}
static bool fold_orc(OptContext *ctx, TCGOp *op)
@@ -1281,6 +1504,15 @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
{
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
+ MemOp mop = get_memop(oi);
+ int width = 8 * memop_size(mop);
+
+ if (!(mop & MO_SIGN) && width < 64) {
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
+ }
+
/* Opcodes that touch guest memory stop the mb optimization. */
ctx->prev_mb = NULL;
return false;
@@ -1306,6 +1538,8 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
if (i >= 0) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
+
+ ctx->z_mask = 1;
return false;
}
@@ -1372,6 +1606,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
op->opc = INDEX_op_setcond_i32;
break;
}
+
+ ctx->z_mask = 1;
return false;
do_setcond_const:
@@ -1380,6 +1616,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
static bool fold_sextract(OptContext *ctx, TCGOp *op)
{
+ int64_t z_mask_old, z_mask;
+
if (arg_is_const(op->args[1])) {
uint64_t t;
@@ -1387,7 +1625,15 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
t = sextract64(t, op->args[2], op->args[3]);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
- return false;
+
+ z_mask_old = arg_info(op->args[1])->z_mask;
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
+ if (op->args[2] == 0 && z_mask >= 0) {
+ ctx->a_mask = z_mask_old ^ z_mask;
+ }
+ ctx->z_mask = z_mask;
+
+ return fold_masks(ctx, op);
}
static bool fold_shift(OptContext *ctx, TCGOp *op)
@@ -1397,6 +1643,13 @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
fold_xi_to_x(ctx, op, 0)) {
return true;
}
+
+ if (arg_is_const(op->args[2])) {
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
+ arg_info(op->args[1])->z_mask,
+ arg_info(op->args[2])->val);
+ return fold_masks(ctx, op);
+ }
return false;
}
@@ -1452,6 +1705,25 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
return fold_addsub2_i32(ctx, op, false);
}
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
+{
+ /* We can't do any folding with a load, but we can record bits. */
+ switch (op->opc) {
+ CASE_OP_32_64(ld8u):
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
+ break;
+ CASE_OP_32_64(ld16u):
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
+ break;
+ case INDEX_op_ld32u_i64:
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
+}
+
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
@@ -1460,7 +1732,10 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, -1)) {
return true;
}
- return false;
+
+ ctx->z_mask = arg_info(op->args[1])->z_mask
+ | arg_info(op->args[2])->z_mask;
+ return fold_masks(ctx, op);
}
/* Propagate constants and copies, fold constant expressions. */
@@ -1481,7 +1756,6 @@ void tcg_optimize(TCGContext *s)
}
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
- uint64_t z_mask, partmask, affected, tmp;
TCGOpcode opc = op->opc;
const TCGOpDef *def;
bool done = false;
@@ -1562,245 +1836,9 @@ void tcg_optimize(TCGContext *s)
break;
}
- /* Simplify using known-zero bits. Currently only ops with a single
- output argument is supported. */
- z_mask = -1;
- affected = -1;
- switch (opc) {
- CASE_OP_32_64(ext8s):
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
- break;
- }
- QEMU_FALLTHROUGH;
- CASE_OP_32_64(ext8u):
- z_mask = 0xff;
- goto and_const;
- CASE_OP_32_64(ext16s):
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
- break;
- }
- QEMU_FALLTHROUGH;
- CASE_OP_32_64(ext16u):
- z_mask = 0xffff;
- goto and_const;
- case INDEX_op_ext32s_i64:
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
- break;
- }
- QEMU_FALLTHROUGH;
- case INDEX_op_ext32u_i64:
- z_mask = 0xffffffffU;
- goto and_const;
-
- CASE_OP_32_64(and):
- z_mask = arg_info(op->args[2])->z_mask;
- if (arg_is_const(op->args[2])) {
- and_const:
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
- }
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
- break;
-
- case INDEX_op_ext_i32_i64:
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
- break;
- }
- QEMU_FALLTHROUGH;
- case INDEX_op_extu_i32_i64:
- /* We do not compute affected as it is a size changing op. */
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
- break;
-
- CASE_OP_32_64(andc):
- /* Known-zeros does not imply known-ones. Therefore unless
- op->args[2] is constant, we can't infer anything from it. */
- if (arg_is_const(op->args[2])) {
- z_mask = ~arg_info(op->args[2])->z_mask;
- goto and_const;
- }
- /* But we certainly know nothing outside args[1] may be set. */
- z_mask = arg_info(op->args[1])->z_mask;
- break;
-
- case INDEX_op_sar_i32:
- if (arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[2])->val & 31;
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
- }
- break;
- case INDEX_op_sar_i64:
- if (arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[2])->val & 63;
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
- }
- break;
-
- case INDEX_op_shr_i32:
- if (arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[2])->val & 31;
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
- }
- break;
- case INDEX_op_shr_i64:
- if (arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[2])->val & 63;
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
- }
- break;
-
- case INDEX_op_extrl_i64_i32:
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
- break;
- case INDEX_op_extrh_i64_i32:
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
- break;
-
- CASE_OP_32_64(shl):
- if (arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
- z_mask = arg_info(op->args[1])->z_mask << tmp;
- }
- break;
-
- CASE_OP_32_64(neg):
- /* Set to 1 all bits to the left of the rightmost. */
- z_mask = -(arg_info(op->args[1])->z_mask
- & -arg_info(op->args[1])->z_mask);
- break;
-
- CASE_OP_32_64(deposit):
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
- op->args[3], op->args[4],
- arg_info(op->args[2])->z_mask);
- break;
-
- CASE_OP_32_64(extract):
- z_mask = extract64(arg_info(op->args[1])->z_mask,
- op->args[2], op->args[3]);
- if (op->args[2] == 0) {
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
- }
- break;
- CASE_OP_32_64(sextract):
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
- op->args[2], op->args[3]);
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
- }
- break;
-
- CASE_OP_32_64(or):
- CASE_OP_32_64(xor):
- z_mask = arg_info(op->args[1])->z_mask
- | arg_info(op->args[2])->z_mask;
- break;
-
- case INDEX_op_clz_i32:
- case INDEX_op_ctz_i32:
- z_mask = arg_info(op->args[2])->z_mask | 31;
- break;
-
- case INDEX_op_clz_i64:
- case INDEX_op_ctz_i64:
- z_mask = arg_info(op->args[2])->z_mask | 63;
- break;
-
- case INDEX_op_ctpop_i32:
- z_mask = 32 | 31;
- break;
- case INDEX_op_ctpop_i64:
- z_mask = 64 | 63;
- break;
-
- CASE_OP_32_64(setcond):
- case INDEX_op_setcond2_i32:
- z_mask = 1;
- break;
-
- CASE_OP_32_64(movcond):
- z_mask = arg_info(op->args[3])->z_mask
- | arg_info(op->args[4])->z_mask;
- break;
-
- CASE_OP_32_64(ld8u):
- z_mask = 0xff;
- break;
- CASE_OP_32_64(ld16u):
- z_mask = 0xffff;
- break;
- case INDEX_op_ld32u_i64:
- z_mask = 0xffffffffu;
- break;
-
- CASE_OP_32_64(qemu_ld):
- {
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
- MemOp mop = get_memop(oi);
- if (!(mop & MO_SIGN)) {
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
- }
- }
- break;
-
- CASE_OP_32_64(bswap16):
- z_mask = arg_info(op->args[1])->z_mask;
- if (z_mask <= 0xffff) {
- op->args[2] |= TCG_BSWAP_IZ;
- }
- z_mask = bswap16(z_mask);
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
- case TCG_BSWAP_OZ:
- break;
- case TCG_BSWAP_OS:
- z_mask = (int16_t)z_mask;
- break;
- default: /* undefined high bits */
- z_mask |= MAKE_64BIT_MASK(16, 48);
- break;
- }
- break;
-
- case INDEX_op_bswap32_i64:
- z_mask = arg_info(op->args[1])->z_mask;
- if (z_mask <= 0xffffffffu) {
- op->args[2] |= TCG_BSWAP_IZ;
- }
- z_mask = bswap32(z_mask);
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
- case TCG_BSWAP_OZ:
- break;
- case TCG_BSWAP_OS:
- z_mask = (int32_t)z_mask;
- break;
- default: /* undefined high bits */
- z_mask |= MAKE_64BIT_MASK(32, 32);
- break;
- }
- break;
-
- default:
- break;
- }
-
- /* 32-bit ops generate 32-bit results. For the result is zero test
- below, we can ignore high bits, but for further optimizations we
- need to record that the high bits contain garbage. */
- partmask = z_mask;
- if (ctx.type == TCG_TYPE_I32) {
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
- partmask &= 0xffffffffu;
- affected &= 0xffffffffu;
- }
- ctx.z_mask = z_mask;
-
- if (partmask == 0) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
- continue;
- }
- if (affected == 0) {
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
- continue;
- }
+ /* Assume all bits affected, and no bits known zero. */
+ ctx.a_mask = -1;
+ ctx.z_mask = -1;
/*
* Process each opcode.
@@ -1873,6 +1911,11 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
done = fold_extu(&ctx, op);
break;
+ CASE_OP_32_64(ld8u):
+ CASE_OP_32_64(ld16u):
+ case INDEX_op_ld32u_i64:
+ done = fold_tcg_ld(&ctx, op);
+ break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 44/56] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (42 preceding siblings ...)
2021-10-28 2:41 ` [PULL 43/56] tcg/optimize: Split out fold_masks Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 45/56] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
` (12 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
and muls2_i64.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
1 file changed, 35 insertions(+), 9 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e84d10be53..e2ecad2884 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1412,19 +1412,44 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint32_t a = arg_info(op->args[2])->val;
- uint32_t b = arg_info(op->args[3])->val;
- uint64_t r = (uint64_t)a * b;
+ uint64_t a = arg_info(op->args[2])->val;
+ uint64_t b = arg_info(op->args[3])->val;
+ uint64_t h, l;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+ TCGOp *op2;
+
+ switch (op->opc) {
+ case INDEX_op_mulu2_i32:
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
+ h = (int32_t)(l >> 32);
+ l = (int32_t)l;
+ break;
+ case INDEX_op_muls2_i32:
+ l = (int64_t)(int32_t)a * (int32_t)b;
+ h = l >> 32;
+ l = (int32_t)l;
+ break;
+ case INDEX_op_mulu2_i64:
+ mulu64(&l, &h, a, b);
+ break;
+ case INDEX_op_muls2_i64:
+ muls64(&l, &h, a, b);
+ break;
+ default:
+ g_assert_not_reached();
+ }
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
+
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
+
+ tcg_opt_gen_movi(ctx, op, rl, l);
+ tcg_opt_gen_movi(ctx, op2, rh, h);
return true;
}
return false;
@@ -1932,8 +1957,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(muluh):
done = fold_mul_highpart(&ctx, op);
break;
- case INDEX_op_mulu2_i32:
- done = fold_mulu2_i32(&ctx, op);
+ CASE_OP_32_64(muls2):
+ CASE_OP_32_64(mulu2):
+ done = fold_multiply2(&ctx, op);
break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 45/56] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (43 preceding siblings ...)
2021-10-28 2:41 ` [PULL 44/56] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 46/56] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
` (11 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Rename to fold_addsub2.
Use Int128 to implement the wider operation.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 44 insertions(+), 21 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e2ecad2884..f723deaafe 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -24,6 +24,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/int128.h"
#include "tcg/tcg-op.h"
#include "tcg-internal.h"
@@ -838,37 +839,59 @@ static bool fold_add(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
- uint32_t al = arg_info(op->args[2])->val;
- uint32_t ah = arg_info(op->args[3])->val;
- uint32_t bl = arg_info(op->args[4])->val;
- uint32_t bh = arg_info(op->args[5])->val;
- uint64_t a = ((uint64_t)ah << 32) | al;
- uint64_t b = ((uint64_t)bh << 32) | bl;
+ uint64_t al = arg_info(op->args[2])->val;
+ uint64_t ah = arg_info(op->args[3])->val;
+ uint64_t bl = arg_info(op->args[4])->val;
+ uint64_t bh = arg_info(op->args[5])->val;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+ TCGOp *op2;
- if (add) {
- a += b;
+ if (ctx->type == TCG_TYPE_I32) {
+ uint64_t a = deposit64(al, 32, 32, ah);
+ uint64_t b = deposit64(bl, 32, 32, bh);
+
+ if (add) {
+ a += b;
+ } else {
+ a -= b;
+ }
+
+ al = sextract64(a, 0, 32);
+ ah = sextract64(a, 32, 32);
} else {
- a -= b;
+ Int128 a = int128_make128(al, ah);
+ Int128 b = int128_make128(bl, bh);
+
+ if (add) {
+ a = int128_add(a, b);
+ } else {
+ a = int128_sub(a, b);
+ }
+
+ al = int128_getlo(a);
+ ah = int128_gethi(a);
}
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
+
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
+
+ tcg_opt_gen_movi(ctx, op, rl, al);
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
return true;
}
return false;
}
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_add2(OptContext *ctx, TCGOp *op)
{
- return fold_addsub2_i32(ctx, op, true);
+ return fold_addsub2(ctx, op, true);
}
static bool fold_and(OptContext *ctx, TCGOp *op)
@@ -1725,9 +1748,9 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
{
- return fold_addsub2_i32(ctx, op, false);
+ return fold_addsub2(ctx, op, false);
}
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
@@ -1873,8 +1896,8 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(add):
done = fold_add(&ctx, op);
break;
- case INDEX_op_add2_i32:
- done = fold_add2_i32(&ctx, op);
+ CASE_OP_32_64(add2):
+ done = fold_add2(&ctx, op);
break;
CASE_OP_32_64_VEC(and):
done = fold_and(&ctx, op);
@@ -2011,8 +2034,8 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
done = fold_sub(&ctx, op);
break;
- case INDEX_op_sub2_i32:
- done = fold_sub2_i32(&ctx, op);
+ CASE_OP_32_64(sub2):
+ done = fold_sub2(&ctx, op);
break;
CASE_OP_32_64_VEC(xor):
done = fold_xor(&ctx, op);
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 46/56] tcg/optimize: Sink commutative operand swapping into fold functions
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (44 preceding siblings ...)
2021-10-28 2:41 ` [PULL 45/56] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 47/56] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
` (10 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires
Most of these are handled by creating a fold_const2_commutative
to handle all of the binary operators. The rest were already
handled on a case-by-case basis in the switch, and have their
own fold function in which to place the call.
We now have only one major switch on TCGOpcode.
Introduce NO_DEST and a block comment for swap_commutative in
order to make the handling of brcond and movcond opcodes cleaner.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
1 file changed, 70 insertions(+), 72 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f723deaafe..e42f5a145f 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -577,6 +577,19 @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
return -1;
}
+/**
+ * swap_commutative:
+ * @dest: TCGArg of the destination argument, or NO_DEST.
+ * @p1: first paired argument
+ * @p2: second paired argument
+ *
+ * If *@p1 is a constant and *@p2 is not, swap.
+ * If *@p2 matches @dest, swap.
+ * Return true if a swap was performed.
+ */
+
+#define NO_DEST temp_arg(NULL)
+
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
{
TCGArg a1 = *p1, a2 = *p2;
@@ -696,6 +709,12 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
+{
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
+ return fold_const2(ctx, op);
+}
+
static bool fold_masks(OptContext *ctx, TCGOp *op)
{
uint64_t a_mask = ctx->a_mask;
@@ -832,7 +851,7 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
static bool fold_add(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_x(ctx, op, 0)) {
return true;
}
@@ -891,6 +910,10 @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
static bool fold_add2(OptContext *ctx, TCGOp *op)
{
+ /* Note that the high and low parts may be independently swapped. */
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
+
return fold_addsub2(ctx, op, true);
}
@@ -898,7 +921,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
{
uint64_t z1, z2;
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, -1) ||
fold_xx_to_x(ctx, op)) {
@@ -950,8 +973,13 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
static bool fold_brcond(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[2];
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
+ int i;
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
+ op->args[2] = cond = tcg_swap_cond(cond);
+ }
+
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
if (i == 0) {
tcg_op_remove(ctx->tcg, op);
return true;
@@ -966,10 +994,14 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[4];
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
TCGArg label = op->args[5];
- int inv = 0;
+ int i, inv = 0;
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
+ op->args[4] = cond = tcg_swap_cond(cond);
+ }
+
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
if (i >= 0) {
goto do_brcond_const;
}
@@ -1219,7 +1251,7 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_x(ctx, op, -1) ||
fold_xi_to_not(ctx, op, 0)) {
return true;
@@ -1381,8 +1413,20 @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
static bool fold_movcond(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[5];
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
+ int i;
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
+ op->args[5] = cond = tcg_swap_cond(cond);
+ }
+ /*
+ * Canonicalize the "false" input reg to match the destination reg so
+ * that the tcg backend can implement a "move if true" operation.
+ */
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
+ op->args[5] = cond = tcg_invert_cond(cond);
+ }
+
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
if (i >= 0) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
}
@@ -1428,7 +1472,7 @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_i(ctx, op, 0)) {
return true;
}
@@ -1437,6 +1481,8 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
{
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
+
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
uint64_t a = arg_info(op->args[2])->val;
uint64_t b = arg_info(op->args[3])->val;
@@ -1480,7 +1526,7 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_not(ctx, op, -1)) {
return true;
}
@@ -1509,7 +1555,7 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_not(ctx, op, 0)) {
return true;
}
@@ -1529,7 +1575,7 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
static bool fold_or(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xi_to_x(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
@@ -1581,8 +1627,13 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
static bool fold_setcond(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[3];
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
+ int i;
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
+ op->args[3] = cond = tcg_swap_cond(cond);
+ }
+
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
if (i >= 0) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
@@ -1594,9 +1645,13 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
{
TCGCond cond = op->args[5];
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
- int inv = 0;
+ int i, inv = 0;
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
+ op->args[5] = cond = tcg_swap_cond(cond);
+ }
+
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
if (i >= 0) {
goto do_setcond_const;
}
@@ -1774,7 +1829,7 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
static bool fold_xor(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
+ if (fold_const2_commutative(ctx, op) ||
fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
fold_xi_to_not(ctx, op, -1)) {
@@ -1827,63 +1882,6 @@ void tcg_optimize(TCGContext *s)
ctx.type = TCG_TYPE_I32;
}
- /* For commutative operations make constant second argument */
- switch (opc) {
- CASE_OP_32_64_VEC(add):
- CASE_OP_32_64_VEC(mul):
- CASE_OP_32_64_VEC(and):
- CASE_OP_32_64_VEC(or):
- CASE_OP_32_64_VEC(xor):
- CASE_OP_32_64(eqv):
- CASE_OP_32_64(nand):
- CASE_OP_32_64(nor):
- CASE_OP_32_64(muluh):
- CASE_OP_32_64(mulsh):
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
- break;
- CASE_OP_32_64(brcond):
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
- op->args[2] = tcg_swap_cond(op->args[2]);
- }
- break;
- CASE_OP_32_64(setcond):
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
- op->args[3] = tcg_swap_cond(op->args[3]);
- }
- break;
- CASE_OP_32_64(movcond):
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
- op->args[5] = tcg_swap_cond(op->args[5]);
- }
- /* For movcond, we canonicalize the "false" input reg to match
- the destination reg so that the tcg backend can implement
- a "move if true" operation. */
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
- op->args[5] = tcg_invert_cond(op->args[5]);
- }
- break;
- CASE_OP_32_64(add2):
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
- break;
- CASE_OP_32_64(mulu2):
- CASE_OP_32_64(muls2):
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
- break;
- case INDEX_op_brcond2_i32:
- if (swap_commutative2(&op->args[0], &op->args[2])) {
- op->args[4] = tcg_swap_cond(op->args[4]);
- }
- break;
- case INDEX_op_setcond2_i32:
- if (swap_commutative2(&op->args[1], &op->args[3])) {
- op->args[5] = tcg_swap_cond(op->args[5]);
- }
- break;
- default:
- break;
- }
-
/* Assume all bits affected, and no bits known zero. */
ctx.a_mask = -1;
ctx.z_mask = -1;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 47/56] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (45 preceding siblings ...)
2021-10-28 2:41 ` [PULL 46/56] tcg/optimize: Sink commutative operand swapping into fold functions Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 48/56] tcg/optimize: Use fold_xx_to_i for orc Richard Henderson
` (9 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
This "garbage" setting pre-dates the addition of the type
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
and INDEX_op_extr{l,h}_i64_i32.
So now we have a definitive points at which to adjust z_mask
to eliminate such bits from the 32-bit operands.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 35 ++++++++++++++++-------------------
1 file changed, 16 insertions(+), 19 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e42f5a145f..e0abf769d0 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -124,10 +124,6 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
ti->is_const = true;
ti->val = ts->val;
ti->z_mask = ts->val;
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
- /* High bits of a 32-bit quantity are garbage. */
- ti->z_mask |= ~0xffffffffull;
- }
} else {
ti->is_const = false;
ti->z_mask = -1;
@@ -192,7 +188,6 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
TCGTemp *src_ts = arg_temp(src);
TempOptInfo *di;
TempOptInfo *si;
- uint64_t z_mask;
TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) {
@@ -224,12 +219,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
op->args[0] = dst;
op->args[1] = src;
- z_mask = si->z_mask;
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
- /* High bits of the destination are now garbage. */
- z_mask |= ~0xffffffffull;
- }
- di->z_mask = z_mask;
+ di->z_mask = si->z_mask;
if (src_ts->type == dst_ts->type) {
TempOptInfo *ni = ts_info(si->next_copy);
@@ -247,9 +237,14 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
TCGArg dst, uint64_t val)
{
- /* Convert movi to mov with constant temp. */
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
+ TCGTemp *tv;
+ if (ctx->type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+
+ /* Convert movi to mov with constant temp. */
+ tv = tcg_constant_internal(ctx->type, val);
init_ts_info(ctx, tv);
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
@@ -721,14 +716,16 @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
uint64_t z_mask = ctx->z_mask;
/*
- * 32-bit ops generate 32-bit results. For the result is zero test
- * below, we can ignore high bits, but for further optimizations we
- * need to record that the high bits contain garbage.
+ * 32-bit ops generate 32-bit results, which for the purpose of
+ * simplifying tcg are sign-extended. Certainly that's how we
+ * represent our constants elsewhere. Note that the bits will
+ * be reset properly for a 64-bit value when encountering the
+ * type changing opcodes.
*/
if (ctx->type == TCG_TYPE_I32) {
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
- a_mask &= MAKE_64BIT_MASK(0, 32);
- z_mask &= MAKE_64BIT_MASK(0, 32);
+ a_mask = (int32_t)a_mask;
+ z_mask = (int32_t)z_mask;
+ ctx->z_mask = z_mask;
}
if (z_mask == 0) {
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 48/56] tcg/optimize: Use fold_xx_to_i for orc
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (46 preceding siblings ...)
2021-10-28 2:41 ` [PULL 47/56] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 49/56] tcg/optimize: Use fold_xi_to_x for mul Richard Henderson
` (8 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Recognize the constant function for or-complement.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index e0abf769d0..6d795954f2 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1586,6 +1586,7 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
static bool fold_orc(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, -1) ||
fold_xi_to_x(ctx, op, -1) ||
fold_ix_to_not(ctx, op, 0)) {
return true;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 49/56] tcg/optimize: Use fold_xi_to_x for mul
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (47 preceding siblings ...)
2021-10-28 2:41 ` [PULL 48/56] tcg/optimize: Use fold_xx_to_i for orc Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 50/56] tcg/optimize: Use fold_xi_to_x for div Richard Henderson
` (7 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Recognize the identity function for low-part multiply.
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 6d795954f2..907049fb06 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1461,7 +1461,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
static bool fold_mul(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
- fold_xi_to_i(ctx, op, 0)) {
+ fold_xi_to_i(ctx, op, 0) ||
+ fold_xi_to_x(ctx, op, 1)) {
return true;
}
return false;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 50/56] tcg/optimize: Use fold_xi_to_x for div
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (48 preceding siblings ...)
2021-10-28 2:41 ` [PULL 49/56] tcg/optimize: Use fold_xi_to_x for mul Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 51/56] tcg/optimize: Use fold_xx_to_i for rem Richard Henderson
` (6 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Recognize the identity function for division.
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 907049fb06..f8b0709157 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1218,7 +1218,11 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
static bool fold_divide(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_x(ctx, op, 1)) {
+ return true;
+ }
+ return false;
}
static bool fold_dup(OptContext *ctx, TCGOp *op)
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 51/56] tcg/optimize: Use fold_xx_to_i for rem
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (49 preceding siblings ...)
2021-10-28 2:41 ` [PULL 50/56] tcg/optimize: Use fold_xi_to_x for div Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 52/56] tcg/optimize: Optimize sign extensions Richard Henderson
` (5 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Philippe Mathieu-Daudé
Recognize the constant function for remainder.
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f8b0709157..7ac63c9231 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1624,7 +1624,11 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
static bool fold_remainder(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xx_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_setcond(OptContext *ctx, TCGOp *op)
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 52/56] tcg/optimize: Optimize sign extensions
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (50 preceding siblings ...)
2021-10-28 2:41 ` [PULL 51/56] tcg/optimize: Use fold_xx_to_i for rem Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 53/56] tcg/optimize: Propagate sign info for logical operations Richard Henderson
` (4 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
Certain targets, like riscv, produce signed 32-bit results.
This can lead to lots of redundant extensions as values are
manipulated.
Begin by tracking only the obvious sign-extensions, and
converting them to simple copies when possible.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 102 insertions(+), 21 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 7ac63c9231..ef202abbcb 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -43,6 +43,7 @@ typedef struct TempOptInfo {
TCGTemp *next_copy;
uint64_t val;
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
} TempOptInfo;
typedef struct OptContext {
@@ -53,9 +54,37 @@ typedef struct OptContext {
/* In flight values from optimization. */
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
+ uint64_t s_mask; /* mask of clrsb(value) bits */
TCGType type;
} OptContext;
+/* Calculate the smask for a specific value. */
+static uint64_t smask_from_value(uint64_t value)
+{
+ int rep = clrsb64(value);
+ return ~(~0ull >> rep);
+}
+
+/*
+ * Calculate the smask for a given set of known-zeros.
+ * If there are lots of zeros on the left, we can consider the remainder
+ * an unsigned field, and thus the corresponding signed field is one bit
+ * larger.
+ */
+static uint64_t smask_from_zmask(uint64_t zmask)
+{
+ /*
+ * Only the 0 bits are significant for zmask, thus the msb itself
+ * must be zero, else we have no sign information.
+ */
+ int rep = clz64(zmask);
+ if (rep == 0) {
+ return 0;
+ }
+ rep -= 1;
+ return ~(~0ull >> rep);
+}
+
static inline TempOptInfo *ts_info(TCGTemp *ts)
{
return ts->state_ptr;
@@ -94,6 +123,7 @@ static void reset_ts(TCGTemp *ts)
ti->prev_copy = ts;
ti->is_const = false;
ti->z_mask = -1;
+ ti->s_mask = 0;
}
static void reset_temp(TCGArg arg)
@@ -124,9 +154,11 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
ti->is_const = true;
ti->val = ts->val;
ti->z_mask = ts->val;
+ ti->s_mask = smask_from_value(ts->val);
} else {
ti->is_const = false;
ti->z_mask = -1;
+ ti->s_mask = 0;
}
}
@@ -220,6 +252,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
op->args[1] = src;
di->z_mask = si->z_mask;
+ di->s_mask = si->s_mask;
if (src_ts->type == dst_ts->type) {
TempOptInfo *ni = ts_info(si->next_copy);
@@ -658,13 +691,15 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
nb_oargs = def->nb_oargs;
for (i = 0; i < nb_oargs; i++) {
- reset_temp(op->args[i]);
+ TCGTemp *ts = arg_temp(op->args[i]);
+ reset_ts(ts);
/*
- * Save the corresponding known-zero bits mask for the
+ * Save the corresponding known-zero/sign bits mask for the
* first output argument (only one supported so far).
*/
if (i == 0) {
- arg_info(op->args[i])->z_mask = ctx->z_mask;
+ ts_info(ts)->z_mask = ctx->z_mask;
+ ts_info(ts)->s_mask = ctx->s_mask;
}
}
}
@@ -714,6 +749,7 @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
{
uint64_t a_mask = ctx->a_mask;
uint64_t z_mask = ctx->z_mask;
+ uint64_t s_mask = ctx->s_mask;
/*
* 32-bit ops generate 32-bit results, which for the purpose of
@@ -725,7 +761,9 @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
if (ctx->type == TCG_TYPE_I32) {
a_mask = (int32_t)a_mask;
z_mask = (int32_t)z_mask;
+ s_mask |= MAKE_64BIT_MASK(32, 32);
ctx->z_mask = z_mask;
+ ctx->s_mask = s_mask;
}
if (z_mask == 0) {
@@ -1072,7 +1110,7 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
static bool fold_bswap(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask, sign;
+ uint64_t z_mask, s_mask, sign;
if (arg_is_const(op->args[1])) {
uint64_t t = arg_info(op->args[1])->val;
@@ -1082,6 +1120,7 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
}
z_mask = arg_info(op->args[1])->z_mask;
+
switch (op->opc) {
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
@@ -1100,6 +1139,7 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
default:
g_assert_not_reached();
}
+ s_mask = smask_from_zmask(z_mask);
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
case TCG_BSWAP_OZ:
@@ -1108,14 +1148,17 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
/* If the sign bit may be 1, force all the bits above to 1. */
if (z_mask & sign) {
z_mask |= sign;
+ s_mask = sign << 1;
}
break;
default:
/* The high bits are undefined: force all bits above the sign to 1. */
z_mask |= sign << 1;
+ s_mask = 0;
break;
}
ctx->z_mask = z_mask;
+ ctx->s_mask = s_mask;
return fold_masks(ctx, op);
}
@@ -1263,21 +1306,24 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
static bool fold_extract(OptContext *ctx, TCGOp *op)
{
uint64_t z_mask_old, z_mask;
+ int pos = op->args[2];
+ int len = op->args[3];
if (arg_is_const(op->args[1])) {
uint64_t t;
t = arg_info(op->args[1])->val;
- t = extract64(t, op->args[2], op->args[3]);
+ t = extract64(t, pos, len);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
z_mask_old = arg_info(op->args[1])->z_mask;
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
- if (op->args[2] == 0) {
+ z_mask = extract64(z_mask_old, pos, len);
+ if (pos == 0) {
ctx->a_mask = z_mask_old ^ z_mask;
}
ctx->z_mask = z_mask;
+ ctx->s_mask = smask_from_zmask(z_mask);
return fold_masks(ctx, op);
}
@@ -1303,14 +1349,16 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
static bool fold_exts(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask_old, z_mask, sign;
+ uint64_t s_mask_old, s_mask, z_mask, sign;
bool type_change = false;
if (fold_const1(ctx, op)) {
return true;
}
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
+ z_mask = arg_info(op->args[1])->z_mask;
+ s_mask = arg_info(op->args[1])->s_mask;
+ s_mask_old = s_mask;
switch (op->opc) {
CASE_OP_32_64(ext8s):
@@ -1334,10 +1382,14 @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
if (z_mask & sign) {
z_mask |= sign;
- } else if (!type_change) {
- ctx->a_mask = z_mask_old ^ z_mask;
}
+ s_mask |= sign << 1;
+
ctx->z_mask = z_mask;
+ ctx->s_mask = s_mask;
+ if (!type_change) {
+ ctx->a_mask = s_mask & ~s_mask_old;
+ }
return fold_masks(ctx, op);
}
@@ -1376,6 +1428,7 @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = z_mask;
+ ctx->s_mask = smask_from_zmask(z_mask);
if (!type_change) {
ctx->a_mask = z_mask_old ^ z_mask;
}
@@ -1606,8 +1659,12 @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
MemOp mop = get_memop(oi);
int width = 8 * memop_size(mop);
- if (!(mop & MO_SIGN) && width < 64) {
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
+ if (width < 64) {
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
+ if (!(mop & MO_SIGN)) {
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
+ ctx->s_mask <<= 1;
+ }
}
/* Opcodes that touch guest memory stop the mb optimization. */
@@ -1726,23 +1783,31 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
static bool fold_sextract(OptContext *ctx, TCGOp *op)
{
- int64_t z_mask_old, z_mask;
+ uint64_t z_mask, s_mask, s_mask_old;
+ int pos = op->args[2];
+ int len = op->args[3];
if (arg_is_const(op->args[1])) {
uint64_t t;
t = arg_info(op->args[1])->val;
- t = sextract64(t, op->args[2], op->args[3]);
+ t = sextract64(t, pos, len);
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
}
- z_mask_old = arg_info(op->args[1])->z_mask;
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
- if (op->args[2] == 0 && z_mask >= 0) {
- ctx->a_mask = z_mask_old ^ z_mask;
- }
+ z_mask = arg_info(op->args[1])->z_mask;
+ z_mask = sextract64(z_mask, pos, len);
ctx->z_mask = z_mask;
+ s_mask_old = arg_info(op->args[1])->s_mask;
+ s_mask = sextract64(s_mask_old, pos, len);
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
+ ctx->s_mask = s_mask;
+
+ if (pos == 0) {
+ ctx->a_mask = s_mask & ~s_mask_old;
+ }
+
return fold_masks(ctx, op);
}
@@ -1819,14 +1884,26 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
{
/* We can't do any folding with a load, but we can record bits. */
switch (op->opc) {
+ CASE_OP_32_64(ld8s):
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
+ break;
CASE_OP_32_64(ld8u):
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
+ break;
+ CASE_OP_32_64(ld16s):
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
break;
CASE_OP_32_64(ld16u):
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
+ break;
+ case INDEX_op_ld32s_i64:
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
break;
case INDEX_op_ld32u_i64:
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
break;
default:
g_assert_not_reached();
@@ -1889,9 +1966,10 @@ void tcg_optimize(TCGContext *s)
ctx.type = TCG_TYPE_I32;
}
- /* Assume all bits affected, and no bits known zero. */
+ /* Assume all bits affected, no bits known zero, no sign reps. */
ctx.a_mask = -1;
ctx.z_mask = -1;
+ ctx.s_mask = 0;
/*
* Process each opcode.
@@ -1964,8 +2042,11 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
done = fold_extu(&ctx, op);
break;
+ CASE_OP_32_64(ld8s):
CASE_OP_32_64(ld8u):
+ CASE_OP_32_64(ld16s):
CASE_OP_32_64(ld16u):
+ case INDEX_op_ld32s_i64:
case INDEX_op_ld32u_i64:
done = fold_tcg_ld(&ctx, op);
break;
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 53/56] tcg/optimize: Propagate sign info for logical operations
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (51 preceding siblings ...)
2021-10-28 2:41 ` [PULL 52/56] tcg/optimize: Optimize sign extensions Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 54/56] tcg/optimize: Propagate sign info for setcond Richard Henderson
` (3 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée, Philippe Mathieu-Daudé
Sign repetitions are perforce all identical, whether they are 1 or 0.
Bitwise operations preserve the relative quantity of the repetitions.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index ef202abbcb..de1abd9cc3 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -967,6 +967,13 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
z2 = arg_info(op->args[2])->z_mask;
ctx->z_mask = z1 & z2;
+ /*
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
+ * Bitwise operations preserve the relative quantity of the repetitions.
+ */
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
+
/*
* Known-zeros does not imply known-ones. Therefore unless
* arg2 is constant, we can't infer affected bits from it.
@@ -1002,6 +1009,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = z1;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1300,6 +1309,9 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1487,6 +1499,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[3])->z_mask
| arg_info(op->args[4])->z_mask;
+ ctx->s_mask = arg_info(op->args[3])->s_mask
+ & arg_info(op->args[4])->s_mask;
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
@@ -1585,6 +1599,9 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, -1)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1614,6 +1631,9 @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1623,6 +1643,8 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
return true;
}
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
+
/* Because of fold_to_not, we want to always return true, via finish. */
finish_folding(ctx, op);
return true;
@@ -1638,6 +1660,8 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1649,6 +1673,9 @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
fold_ix_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1922,6 +1949,8 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 54/56] tcg/optimize: Propagate sign info for setcond
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (52 preceding siblings ...)
2021-10-28 2:41 ` [PULL 53/56] tcg/optimize: Propagate sign info for logical operations Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 55/56] tcg/optimize: Propagate sign info for bit counting Richard Henderson
` (2 subsequent siblings)
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
The result is either 0 or 1, which means that we have
a 2 bit signed result, and thus 62 bits of sign.
For clarity, use the smask_from_zmask function.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index de1abd9cc3..5fa4d7285d 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1730,6 +1730,7 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = 1;
+ ctx->s_mask = smask_from_zmask(1);
return false;
}
@@ -1802,6 +1803,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = 1;
+ ctx->s_mask = smask_from_zmask(1);
return false;
do_setcond_const:
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 55/56] tcg/optimize: Propagate sign info for bit counting
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (53 preceding siblings ...)
2021-10-28 2:41 ` [PULL 54/56] tcg/optimize: Propagate sign info for setcond Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 2:41 ` [PULL 56/56] tcg/optimize: Propagate sign info for shifting Richard Henderson
2021-10-28 14:51 ` [PULL 00/56] tcg patch queue Richard Henderson
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
The results are generally 6 bit unsigned values, though
the count leading and trailing bits may produce any value
for a zero input.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 5fa4d7285d..c0eccc61d6 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1229,7 +1229,7 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
g_assert_not_reached();
}
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
-
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
return false;
}
@@ -1249,6 +1249,7 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
default:
g_assert_not_reached();
}
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
return false;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* [PULL 56/56] tcg/optimize: Propagate sign info for shifting
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (54 preceding siblings ...)
2021-10-28 2:41 ` [PULL 55/56] tcg/optimize: Propagate sign info for bit counting Richard Henderson
@ 2021-10-28 2:41 ` Richard Henderson
2021-10-28 14:51 ` [PULL 00/56] tcg patch queue Richard Henderson
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 2:41 UTC (permalink / raw)
To: qemu-devel; +Cc: Luis Pires, Alex Bennée
For constant shifts, we can simply shift the s_mask.
For variable shifts, we know that sar does not reduce
the s_mask, which helps for sequences like
ext32s_i64 t, in
sar_i64 t, t, v
ext32s_i64 out, t
allowing the final extend to be eliminated.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 47 insertions(+), 3 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index c0eccc61d6..dbb2d46e88 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -85,6 +85,18 @@ static uint64_t smask_from_zmask(uint64_t zmask)
return ~(~0ull >> rep);
}
+/*
+ * Recreate a properly left-aligned smask after manipulation.
+ * Some bit-shuffling, particularly shifts and rotates, may
+ * retain sign bits on the left, but may scatter disconnected
+ * sign bits on the right. Retain only what remains to the left.
+ */
+static uint64_t smask_from_smask(int64_t smask)
+{
+ /* Only the 1 bits are significant for smask */
+ return smask_from_zmask(~smask);
+}
+
static inline TempOptInfo *ts_info(TCGTemp *ts)
{
return ts->state_ptr;
@@ -1843,18 +1855,50 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
+ uint64_t s_mask, z_mask, sign;
+
if (fold_const2(ctx, op) ||
fold_ix_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0)) {
return true;
}
+ s_mask = arg_info(op->args[1])->s_mask;
+ z_mask = arg_info(op->args[1])->z_mask;
+
if (arg_is_const(op->args[2])) {
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
- arg_info(op->args[1])->z_mask,
- arg_info(op->args[2])->val);
+ int sh = arg_info(op->args[2])->val;
+
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
+
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
+ ctx->s_mask = smask_from_smask(s_mask);
+
return fold_masks(ctx, op);
}
+
+ switch (op->opc) {
+ CASE_OP_32_64(sar):
+ /*
+ * Arithmetic right shift will not reduce the number of
+ * input sign repetitions.
+ */
+ ctx->s_mask = s_mask;
+ break;
+ CASE_OP_32_64(shr):
+ /*
+ * If the sign bit is known zero, then logical right shift
+ * will not reduced the number of input sign repetitions.
+ */
+ sign = (s_mask & -s_mask) >> 1;
+ if (!(z_mask & sign)) {
+ ctx->s_mask = s_mask;
+ }
+ break;
+ default:
+ break;
+ }
+
return false;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 58+ messages in thread
* Re: [PULL 00/56] tcg patch queue
2021-10-28 2:40 [PULL 00/56] tcg patch queue Richard Henderson
` (55 preceding siblings ...)
2021-10-28 2:41 ` [PULL 56/56] tcg/optimize: Propagate sign info for shifting Richard Henderson
@ 2021-10-28 14:51 ` Richard Henderson
56 siblings, 0 replies; 58+ messages in thread
From: Richard Henderson @ 2021-10-28 14:51 UTC (permalink / raw)
To: qemu-devel
On 10/27/21 7:40 PM, Richard Henderson wrote:
> The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
>
> Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
>
> are available in the Git repository at:
>
> https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
>
> for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
>
> tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
>
> ----------------------------------------------------------------
> Improvements to qemu/int128
> Fixes for 128/64 division.
> Cleanup tcg/optimize.c
> Optimize redundant sign extensions
>
> ----------------------------------------------------------------
> Frédéric Pétrot (1):
> qemu/int128: Add int128_{not,xor}
>
> Luis Pires (4):
> host-utils: move checks out of divu128/divs128
> host-utils: move udiv_qrnnd() to host-utils
> host-utils: add 128-bit quotient support to divu128/divs128
> host-utils: add unit tests for divu128/divs128
>
> Richard Henderson (51):
> tcg/optimize: Rename "mask" to "z_mask"
> tcg/optimize: Split out OptContext
> tcg/optimize: Remove do_default label
> tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
> tcg/optimize: Move prev_mb into OptContext
> tcg/optimize: Split out init_arguments
> tcg/optimize: Split out copy_propagate
> tcg/optimize: Split out fold_call
> tcg/optimize: Drop nb_oargs, nb_iargs locals
> tcg/optimize: Change fail return for do_constant_folding_cond*
> tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
> tcg/optimize: Split out finish_folding
> tcg/optimize: Use a boolean to avoid a mass of continues
> tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
> tcg/optimize: Split out fold_const{1,2}
> tcg/optimize: Split out fold_setcond2
> tcg/optimize: Split out fold_brcond2
> tcg/optimize: Split out fold_brcond
> tcg/optimize: Split out fold_setcond
> tcg/optimize: Split out fold_mulu2_i32
> tcg/optimize: Split out fold_addsub2_i32
> tcg/optimize: Split out fold_movcond
> tcg/optimize: Split out fold_extract2
> tcg/optimize: Split out fold_extract, fold_sextract
> tcg/optimize: Split out fold_deposit
> tcg/optimize: Split out fold_count_zeros
> tcg/optimize: Split out fold_bswap
> tcg/optimize: Split out fold_dup, fold_dup2
> tcg/optimize: Split out fold_mov
> tcg/optimize: Split out fold_xx_to_i
> tcg/optimize: Split out fold_xx_to_x
> tcg/optimize: Split out fold_xi_to_i
> tcg/optimize: Add type to OptContext
> tcg/optimize: Split out fold_to_not
> tcg/optimize: Split out fold_sub_to_neg
> tcg/optimize: Split out fold_xi_to_x
> tcg/optimize: Split out fold_ix_to_i
> tcg/optimize: Split out fold_masks
> tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
> tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
> tcg/optimize: Sink commutative operand swapping into fold functions
> tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
> tcg/optimize: Use fold_xx_to_i for orc
> tcg/optimize: Use fold_xi_to_x for mul
> tcg/optimize: Use fold_xi_to_x for div
> tcg/optimize: Use fold_xx_to_i for rem
> tcg/optimize: Optimize sign extensions
> tcg/optimize: Propagate sign info for logical operations
> tcg/optimize: Propagate sign info for setcond
> tcg/optimize: Propagate sign info for bit counting
> tcg/optimize: Propagate sign info for shifting
>
> include/fpu/softfloat-macros.h | 82 --
> include/hw/clock.h | 5 +-
> include/qemu/host-utils.h | 121 +-
> include/qemu/int128.h | 20 +
> target/ppc/int_helper.c | 23 +-
> tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
> tests/unit/test-div128.c | 197 +++
> util/host-utils.c | 147 ++-
> tests/unit/meson.build | 1 +
> 9 files changed, 2053 insertions(+), 1187 deletions(-)
> create mode 100644 tests/unit/test-div128.c
Failed testing on s390x host.
r~
^ permalink raw reply [flat|nested] 58+ messages in thread