All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org>
Subject: [PATCH for-6.2 35/43] target/mips: Use 8-byte memory ops for msa load/store
Date: Wed, 28 Jul 2021 14:46:39 -1000	[thread overview]
Message-ID: <20210729004647.282017-36-richard.henderson@linaro.org> (raw)
In-Reply-To: <20210729004647.282017-1-richard.henderson@linaro.org>

Rather than use 4-16 separate operations, use 2 operations
plus some byte reordering as necessary.

Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/mips/tcg/msa_helper.c | 201 +++++++++++++----------------------
 1 file changed, 71 insertions(+), 130 deletions(-)

diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index a8880ce81c..e40c1b7057 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -8218,47 +8218,31 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
 #define MEMOP_IDX(DF)
 #endif
 
+#ifdef TARGET_WORDS_BIGENDIAN
+static inline uint64_t bswap16x4(uint64_t x)
+{
+    uint64_t m = 0x00ff00ff00ff00ffull;
+    return ((x & m) << 8) | ((x >> 8) & m);
+}
+
+static inline uint64_t bswap32x2(uint64_t x)
+{
+    return ror64(bswap64(x), 32);
+}
+#endif
+
 void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
                      target_ulong addr)
 {
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    pwd->b[0]  = cpu_ldub_data_ra(env, addr + (0  << DF_BYTE), ra);
-    pwd->b[1]  = cpu_ldub_data_ra(env, addr + (1  << DF_BYTE), ra);
-    pwd->b[2]  = cpu_ldub_data_ra(env, addr + (2  << DF_BYTE), ra);
-    pwd->b[3]  = cpu_ldub_data_ra(env, addr + (3  << DF_BYTE), ra);
-    pwd->b[4]  = cpu_ldub_data_ra(env, addr + (4  << DF_BYTE), ra);
-    pwd->b[5]  = cpu_ldub_data_ra(env, addr + (5  << DF_BYTE), ra);
-    pwd->b[6]  = cpu_ldub_data_ra(env, addr + (6  << DF_BYTE), ra);
-    pwd->b[7]  = cpu_ldub_data_ra(env, addr + (7  << DF_BYTE), ra);
-    pwd->b[8]  = cpu_ldub_data_ra(env, addr + (8  << DF_BYTE), ra);
-    pwd->b[9]  = cpu_ldub_data_ra(env, addr + (9  << DF_BYTE), ra);
-    pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
-    pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
-    pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
-    pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
-    pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
-    pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
-#else
-    pwd->b[0]  = cpu_ldub_data_ra(env, addr + (7  << DF_BYTE), ra);
-    pwd->b[1]  = cpu_ldub_data_ra(env, addr + (6  << DF_BYTE), ra);
-    pwd->b[2]  = cpu_ldub_data_ra(env, addr + (5  << DF_BYTE), ra);
-    pwd->b[3]  = cpu_ldub_data_ra(env, addr + (4  << DF_BYTE), ra);
-    pwd->b[4]  = cpu_ldub_data_ra(env, addr + (3  << DF_BYTE), ra);
-    pwd->b[5]  = cpu_ldub_data_ra(env, addr + (2  << DF_BYTE), ra);
-    pwd->b[6]  = cpu_ldub_data_ra(env, addr + (1  << DF_BYTE), ra);
-    pwd->b[7]  = cpu_ldub_data_ra(env, addr + (0  << DF_BYTE), ra);
-    pwd->b[8]  = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
-    pwd->b[9]  = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
-    pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
-    pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
-    pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
-    pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
-    pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
-    pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
-#endif
+    /* Load 8 bytes at a time.  Vector element ordering makes this LE.  */
+    d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+    d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+    pwd->d[0] = d0;
+    pwd->d[1] = d1;
 }
 
 void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
@@ -8266,26 +8250,20 @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
 {
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
-    pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
-    pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
-    pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
-    pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
-    pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
-    pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
-    pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
-#else
-    pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
-    pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
-    pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
-    pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
-    pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
-    pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
-    pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
-    pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
+    /*
+     * Load 8 bytes at a time.  Use little-endian load, then for
+     * big-endian target, we must then swap the four halfwords.
+     */
+    d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+    d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+#ifdef TARGET_WORDS_BIGENDIAN
+    d0 = bswap16x4(d0);
+    d1 = bswap16x4(d1);
 #endif
+    pwd->d[0] = d0;
+    pwd->d[1] = d1;
 }
 
 void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
@@ -8293,18 +8271,20 @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
 {
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
-    pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
-    pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
-    pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
-#else
-    pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
-    pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
-    pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
-    pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
+    /*
+     * Load 8 bytes at a time.  Use little-endian load, then for
+     * big-endian target, we must then bswap the two words.
+     */
+    d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+    d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+#ifdef TARGET_WORDS_BIGENDIAN
+    d0 = bswap32x2(d0);
+    d1 = bswap32x2(d1);
 #endif
+    pwd->d[0] = d0;
+    pwd->d[1] = d1;
 }
 
 void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
@@ -8312,9 +8292,12 @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
 {
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
-    pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
-    pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
+    d0 = cpu_ldq_data_ra(env, addr + 0, ra);
+    d1 = cpu_ldq_data_ra(env, addr + 8, ra);
+    pwd->d[0] = d0;
+    pwd->d[1] = d1;
 }
 
 #define MSA_PAGESPAN(x) \
@@ -8344,41 +8327,9 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
 
     ensure_writable_pages(env, addr, mmu_idx, ra);
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    cpu_stb_data_ra(env, addr + (0  << DF_BYTE), pwd->b[0], ra);
-    cpu_stb_data_ra(env, addr + (1  << DF_BYTE), pwd->b[1], ra);
-    cpu_stb_data_ra(env, addr + (2  << DF_BYTE), pwd->b[2], ra);
-    cpu_stb_data_ra(env, addr + (3  << DF_BYTE), pwd->b[3], ra);
-    cpu_stb_data_ra(env, addr + (4  << DF_BYTE), pwd->b[4], ra);
-    cpu_stb_data_ra(env, addr + (5  << DF_BYTE), pwd->b[5], ra);
-    cpu_stb_data_ra(env, addr + (6  << DF_BYTE), pwd->b[6], ra);
-    cpu_stb_data_ra(env, addr + (7  << DF_BYTE), pwd->b[7], ra);
-    cpu_stb_data_ra(env, addr + (8  << DF_BYTE), pwd->b[8], ra);
-    cpu_stb_data_ra(env, addr + (9  << DF_BYTE), pwd->b[9], ra);
-    cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
-    cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
-    cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
-    cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
-    cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
-    cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
-#else
-    cpu_stb_data_ra(env, addr + (7  << DF_BYTE), pwd->b[0], ra);
-    cpu_stb_data_ra(env, addr + (6  << DF_BYTE), pwd->b[1], ra);
-    cpu_stb_data_ra(env, addr + (5  << DF_BYTE), pwd->b[2], ra);
-    cpu_stb_data_ra(env, addr + (4  << DF_BYTE), pwd->b[3], ra);
-    cpu_stb_data_ra(env, addr + (3  << DF_BYTE), pwd->b[4], ra);
-    cpu_stb_data_ra(env, addr + (2  << DF_BYTE), pwd->b[5], ra);
-    cpu_stb_data_ra(env, addr + (1  << DF_BYTE), pwd->b[6], ra);
-    cpu_stb_data_ra(env, addr + (0  << DF_BYTE), pwd->b[7], ra);
-    cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
-    cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
-    cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
-    cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
-    cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
-    cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
-    cpu_stb_data_ra(env, addr + (9  << DF_BYTE), pwd->b[14], ra);
-    cpu_stb_data_ra(env, addr + (8  << DF_BYTE), pwd->b[15], ra);
-#endif
+    /* Store 8 bytes at a time.  Vector element ordering makes this LE.  */
+    cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
+    cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
 }
 
 void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
@@ -8387,28 +8338,19 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     int mmu_idx = cpu_mmu_index(env, false);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
     ensure_writable_pages(env, addr, mmu_idx, ra);
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
-    cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
-    cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
-    cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
-    cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
-    cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
-    cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
-    cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
-#else
-    cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
-    cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
-    cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
-    cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
-    cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
-    cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
-    cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
-    cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
+    /* Store 8 bytes at a time.  See helper_msa_ld_h. */
+    d0 = pwd->d[0];
+    d1 = pwd->d[1];
+#ifdef TARGET_WORDS_BIGENDIAN
+    d0 = bswap16x4(d0);
+    d1 = bswap16x4(d1);
 #endif
+    cpu_stq_le_data_ra(env, addr + 0, d0, ra);
+    cpu_stq_le_data_ra(env, addr + 8, d1, ra);
 }
 
 void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
@@ -8417,20 +8359,19 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
     int mmu_idx = cpu_mmu_index(env, false);
     uintptr_t ra = GETPC();
+    uint64_t d0, d1;
 
     ensure_writable_pages(env, addr, mmu_idx, ra);
 
-#if !defined(HOST_WORDS_BIGENDIAN)
-    cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
-    cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
-    cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
-    cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
-#else
-    cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
-    cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
-    cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
-    cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
+    /* Store 8 bytes at a time.  See helper_msa_ld_w. */
+    d0 = pwd->d[0];
+    d1 = pwd->d[1];
+#ifdef TARGET_WORDS_BIGENDIAN
+    d0 = bswap32x2(d0);
+    d1 = bswap32x2(d1);
 #endif
+    cpu_stq_le_data_ra(env, addr + 0, d0, ra);
+    cpu_stq_le_data_ra(env, addr + 8, d1, ra);
 }
 
 void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
@@ -8442,6 +8383,6 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
 
     ensure_writable_pages(env, addr, mmu_idx, GETPC());
 
-    cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
-    cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
+    cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
+    cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
 }
-- 
2.25.1



  parent reply	other threads:[~2021-07-29  1:27 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-29  0:46 [PATCH for-6.2 00/43] Unaligned accesses for user-only Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 01/43] hw/core: Make do_unaligned_access available to user-only Richard Henderson
2021-07-29  6:14   ` Philippe Mathieu-Daudé
2021-07-29  6:19   ` Philippe Mathieu-Daudé
2021-07-29 17:51     ` Richard Henderson
2021-07-29 13:05   ` Peter Maydell
2021-07-29  0:46 ` [PATCH for-6.2 02/43] target/alpha: Implement do_unaligned_access for user-only Richard Henderson
2021-07-29 13:05   ` Peter Maydell
2021-07-29  0:46 ` [PATCH for-6.2 03/43] target/arm: " Richard Henderson
2021-07-29 13:14   ` Peter Maydell
2021-07-29 18:51     ` Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 04/43] target/hppa: " Richard Henderson
2021-07-29 13:15   ` Peter Maydell
2021-07-29 17:55     ` Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 05/43] target/microblaze: " Richard Henderson
2021-07-29  8:26   ` Philippe Mathieu-Daudé
2021-07-29 13:26   ` Peter Maydell
2021-07-29 18:00     ` Richard Henderson
2021-07-29 18:44       ` Edgar E. Iglesias
2021-07-29  0:46 ` [PATCH for-6.2 06/43] target/mips: " Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 07/43] target/ppc: Set fault address in ppc_cpu_do_unaligned_access Richard Henderson
2021-07-29 13:44   ` Peter Maydell
2021-07-29 18:05     ` Richard Henderson
2021-07-30 17:13       ` Cédric Le Goater
2021-07-30 17:23         ` Cédric Le Goater
2021-07-30 16:58   ` Cédric Le Goater
2021-07-29  0:46 ` [PATCH for-6.2 08/43] target/ppc: Implement do_unaligned_access for user-only Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 09/43] target/riscv: " Richard Henderson
2021-07-30  6:13   ` Alistair Francis
2021-07-30  6:13     ` Alistair Francis
2021-07-29  0:46 ` [PATCH for-6.2 10/43] target/s390x: " Richard Henderson
2021-07-29  8:03   ` David Hildenbrand
2021-07-29  0:46 ` [PATCH for-6.2 11/43] target/sh4: Set fault address in superh_cpu_do_unaligned_access Richard Henderson
2021-07-29  6:15   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 12/43] target/sh4: Implement do_unaligned_access for user-only Richard Henderson
2021-07-29 13:52   ` Peter Maydell
2021-07-30  0:01     ` Richard Henderson
2021-07-30 20:54     ` Rob Landley
2021-07-29  0:46 ` [PATCH for-6.2 13/43] target/sparc: Remove DEBUG_UNALIGNED Richard Henderson
2021-07-29  6:16   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 14/43] target/sparc: Set fault address in sparc_cpu_do_unaligned_access Richard Henderson
2021-07-29 14:51   ` Peter Maydell
2021-08-01 15:56     ` Mark Cave-Ayland
2021-08-01 15:59       ` Peter Maydell
2021-08-01 16:13         ` Mark Cave-Ayland
2021-07-29  0:46 ` [PATCH for-6.2 15/43] target/sparc: Implement do_unaligned_access for user-only Richard Henderson
2021-07-29  9:40   ` Philippe Mathieu-Daudé
2021-07-29 18:20     ` Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 16/43] target/xtensa: " Richard Henderson
2021-07-29  8:10   ` Philippe Mathieu-Daudé
2021-07-29 14:55   ` Peter Maydell
2021-07-29 18:22     ` Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 17/43] accel/tcg: Report unaligned atomics " Richard Henderson
2021-07-29 15:02   ` Peter Maydell
2021-07-29 19:55     ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 18/43] accel/tcg: Drop signness in tracing in cputlb.c Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 19/43] tcg: Expand MO_SIZE to 3 bits Richard Henderson
2021-07-29  6:23   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 20/43] tcg: Rename TCGMemOpIdx to MemOpIdx Richard Henderson
2021-07-29  6:27   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 21/43] tcg: Split out MemOpIdx to exec/memopidx.h Richard Henderson
2021-07-29  6:27   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 22/43] trace/mem: Pass MemOpIdx to trace_mem_get_info Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 23/43] accel/tcg: Remove double bswap for helper_atomic_sto_*_mmu Richard Henderson
2021-07-29  6:29   ` [PATCH for-6.1? " Philippe Mathieu-Daudé
2021-07-29 18:37     ` Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 24/43] accel/tcg: Pass MemOpIdx to atomic_trace_*_post Richard Henderson
2021-07-29  6:31   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 25/43] plugins: Reorg arguments to qemu_plugin_vcpu_mem_cb Richard Henderson
2021-08-30 21:26   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 26/43] trace: Split guest_mem_before Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 27/43] target/arm: Use MO_128 for 16 byte atomics Richard Henderson
2021-07-29  6:32   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 28/43] target/i386: " Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 29/43] target/ppc: " Richard Henderson
2021-07-29  6:34   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 30/43] target/s390x: " Richard Henderson
2021-07-29  6:33   ` Philippe Mathieu-Daudé
2021-07-29  8:04   ` David Hildenbrand
2021-07-29  0:46 ` [PATCH for-6.2 31/43] target/hexagon: Implement cpu_mmu_index Richard Henderson
2021-07-29  2:37   ` Taylor Simpson
2021-07-29  6:35   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 32/43] accel/tcg: Add cpu_{ld,st}*_mmu interfaces Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 33/43] accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h Richard Henderson
2021-07-29  7:36   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 34/43] target/mips: Use cpu_*_data_ra for msa load/store Richard Henderson
2021-07-29  7:38   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` Richard Henderson [this message]
2021-07-29  0:46 ` [PATCH for-6.2 36/43] target/s390x: Use cpu_*_mmu instead of helper_*_mmu Richard Henderson
2021-07-29  7:39   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 37/43] target/sparc: " Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 38/43] target/arm: " Richard Henderson
2021-07-29  7:41   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 39/43] tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h Richard Henderson
2021-07-29  7:42   ` Philippe Mathieu-Daudé
2021-07-29  0:46 ` [PATCH for-6.2 40/43] linux-user/alpha: Remove TARGET_ALIGNED_ONLY Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 41/43] tcg: Add helper_unaligned_mmu for user-only sigbus Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 42/43] tcg/i386: Support raising sigbus for user-only Richard Henderson
2021-07-29  0:46 ` [PATCH for-6.2 43/43] tests/tcg/multiarch: Add sigbus.c Richard Henderson
2021-07-29  6:14 ` [PATCH for-6.2 00/43] Unaligned accesses for user-only Philippe Mathieu-Daudé
2021-07-29 14:01   ` Claudio Fontana
2021-08-02 13:14 ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210729004647.282017-36-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=f4bug@amsat.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.