All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: edgar.iglesias@xilinx.com
Subject: [PATCH v2 53/76] target/microblaze: Convert dec_load and dec_store to decodetree
Date: Fri, 28 Aug 2020 07:19:06 -0700	[thread overview]
Message-ID: <20200828141929.77854-54-richard.henderson@linaro.org> (raw)
In-Reply-To: <20200828141929.77854-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/microblaze/insns.decode |  32 ++
 target/microblaze/translate.c  | 723 +++++++++++++++++++--------------
 2 files changed, 456 insertions(+), 299 deletions(-)

diff --git a/target/microblaze/insns.decode b/target/microblaze/insns.decode
index 87e8f5679b..47b92b9cbc 100644
--- a/target/microblaze/insns.decode
+++ b/target/microblaze/insns.decode
@@ -100,6 +100,22 @@ idivu           010010 ..... ..... ..... 000 0000 0010  @typea
 
 imm             101100 00000 00000 imm:16
 
+lbu             110000 ..... ..... ..... 0000 000 0000  @typea
+lbur            110000 ..... ..... ..... 0100 000 0000  @typea
+lbuea           110000 ..... ..... ..... 0001 000 0000  @typea
+lbui            111000 ..... ..... ................     @typeb
+
+lhu             110001 ..... ..... ..... 0000 000 0000  @typea
+lhur            110001 ..... ..... ..... 0100 000 0000  @typea
+lhuea           110001 ..... ..... ..... 0001 000 0000  @typea
+lhui            111001 ..... ..... ................     @typeb
+
+lw              110010 ..... ..... ..... 0000 000 0000  @typea
+lwr             110010 ..... ..... ..... 0100 000 0000  @typea
+lwea            110010 ..... ..... ..... 0001 000 0000  @typea
+lwx             110010 ..... ..... ..... 1000 000 0000  @typea
+lwi             111010 ..... ..... ................     @typeb
+
 mul             010000 ..... ..... ..... 000 0000 0000  @typea
 mulh            010000 ..... ..... ..... 000 0000 0001  @typea
 mulhu           010000 ..... ..... ..... 000 0000 0011  @typea
@@ -123,6 +139,22 @@ rsubic          001011 ..... ..... ................     @typeb
 rsubik          001101 ..... ..... ................     @typeb
 rsubikc         001111 ..... ..... ................     @typeb
 
+sb              110100 ..... ..... ..... 0000 000 0000  @typea
+sbr             110100 ..... ..... ..... 0100 000 0000  @typea
+sbea            110100 ..... ..... ..... 0001 000 0000  @typea
+sbi             111100 ..... ..... ................     @typeb
+
+sh              110101 ..... ..... ..... 0000 000 0000  @typea
+shr             110101 ..... ..... ..... 0100 000 0000  @typea
+shea            110101 ..... ..... ..... 0001 000 0000  @typea
+shi             111101 ..... ..... ................     @typeb
+
+sw              110110 ..... ..... ..... 0000 000 0000  @typea
+swr             110110 ..... ..... ..... 0100 000 0000  @typea
+swea            110110 ..... ..... ..... 0001 000 0000  @typea
+swx             110110 ..... ..... ..... 1000 000 0000  @typea
+swi             111110 ..... ..... ................     @typeb
+
 sext8           100100 ..... ..... 00000 000 0110 0000  @typea0
 sext16          100100 ..... ..... 00000 000 0110 0001  @typea0
 
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index a55e110171..d2baa7db0e 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -105,6 +105,17 @@ static inline void t_sync_flags(DisasContext *dc)
     }
 }
 
+static inline void sync_jmpstate(DisasContext *dc)
+{
+    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
+        if (dc->jmp == JMP_DIRECT) {
+            tcg_gen_movi_i32(cpu_btaken, 1);
+        }
+        dc->jmp = JMP_INDIRECT;
+        tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
+    }
+}
+
 static void gen_raise_exception(DisasContext *dc, uint32_t index)
 {
     TCGv_i32 tmp = tcg_const_i32(index);
@@ -668,6 +679,419 @@ static bool trans_wdic(DisasContext *dc, arg_wdic *a)
 DO_TYPEA(xor, false, tcg_gen_xor_i32)
 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
 
+static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
+{
+    TCGv ret = tcg_temp_new();
+
+    /* If any of the regs is r0, set t to the value of the other reg.  */
+    if (ra && rb) {
+        TCGv_i32 tmp = tcg_temp_new_i32();
+        tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
+        tcg_gen_extu_i32_tl(ret, tmp);
+        tcg_temp_free_i32(tmp);
+    } else if (ra) {
+        tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+    } else if (rb) {
+        tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+    } else {
+        tcg_gen_movi_tl(ret, 0);
+    }
+
+    if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
+        gen_helper_stackprot(cpu_env, ret);
+    }
+    return ret;
+}
+
+static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
+{
+    TCGv ret = tcg_temp_new();
+
+    /* If any of the regs is r0, set t to the value of the other reg.  */
+    if (ra) {
+        TCGv_i32 tmp = tcg_temp_new_i32();
+        tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
+        tcg_gen_extu_i32_tl(ret, tmp);
+        tcg_temp_free_i32(tmp);
+    } else {
+        tcg_gen_movi_tl(ret, (uint32_t)imm);
+    }
+
+    if (ra == 1 && dc->cpu->cfg.stackprot) {
+        gen_helper_stackprot(cpu_env, ret);
+    }
+    return ret;
+}
+
+static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
+{
+    int addr_size = dc->cpu->cfg.addr_size;
+    TCGv ret = tcg_temp_new();
+
+    if (addr_size == 32 || ra == 0) {
+        if (rb) {
+            tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+        } else {
+            tcg_gen_movi_tl(ret, 0);
+        }
+    } else {
+        if (rb) {
+            tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
+        } else {
+            tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+            tcg_gen_shli_tl(ret, ret, 32);
+        }
+        if (addr_size < 64) {
+            /* Mask off out of range bits.  */
+            tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
+        }
+    }
+    return ret;
+}
+
+static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+                    int mem_index, bool rev)
+{
+    TCGv_i32 v;
+    MemOp size = mop & MO_SIZE;
+
+    /*
+     * When doing reverse accesses we need to do two things.
+     *
+     * 1. Reverse the address wrt endianness.
+     * 2. Byteswap the data lanes on the way back into the CPU core.
+     */
+    if (rev) {
+        if (size > MO_8) {
+            mop ^= MO_BSWAP;
+        }
+        if (size < MO_32) {
+            tcg_gen_xori_tl(addr, addr, 3 - size);
+        }
+    }
+
+    t_sync_flags(dc);
+    sync_jmpstate(dc);
+
+    /*
+     * Microblaze gives MMU faults priority over faults due to
+     * unaligned addresses. That's why we speculatively do the load
+     * into v. If the load succeeds, we verify alignment of the
+     * address and if that succeeds we write into the destination reg.
+     */
+    v = tcg_temp_new_i32();
+    tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
+
+    /* TODO: Convert to CPUClass::do_unaligned_access.  */
+    if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
+        TCGv_i32 t0 = tcg_const_i32(0);
+        TCGv_i32 treg = tcg_const_i32(rd);
+        TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
+
+        tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
+        gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
+
+        tcg_temp_free_i32(t0);
+        tcg_temp_free_i32(treg);
+        tcg_temp_free_i32(tsize);
+    }
+
+    if (rd) {
+        tcg_gen_mov_i32(cpu_R[rd], v);
+    }
+
+    tcg_temp_free_i32(v);
+    tcg_temp_free(addr);
+    return true;
+}
+
+static bool trans_lbu(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_lbur(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
+}
+
+static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_lhu(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_lhur(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+}
+
+static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_lw(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_lwr(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+}
+
+static bool trans_lwea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_lwx(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+
+    /* lwx does not throw unaligned access errors, so force alignment */
+    tcg_gen_andi_tl(addr, addr, ~3);
+
+    t_sync_flags(dc);
+    sync_jmpstate(dc);
+
+    tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
+    tcg_gen_mov_tl(cpu_res_addr, addr);
+    tcg_temp_free(addr);
+
+    if (arg->rd) {
+        tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
+    }
+
+    /* No support for AXI exclusive so always clear C */
+    tcg_gen_movi_i32(cpu_msr_c, 0);
+    return true;
+}
+
+static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+                     int mem_index, bool rev)
+{
+    MemOp size = mop & MO_SIZE;
+
+    /*
+     * When doing reverse accesses we need to do two things.
+     *
+     * 1. Reverse the address wrt endianness.
+     * 2. Byteswap the data lanes on the way back into the CPU core.
+     */
+    if (rev) {
+        if (size > MO_8) {
+            mop ^= MO_BSWAP;
+        }
+        if (size < MO_32) {
+            tcg_gen_xori_tl(addr, addr, 3 - size);
+        }
+    }
+
+    t_sync_flags(dc);
+    sync_jmpstate(dc);
+
+    tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
+
+    /* TODO: Convert to CPUClass::do_unaligned_access.  */
+    if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
+        TCGv_i32 t1 = tcg_const_i32(1);
+        TCGv_i32 treg = tcg_const_i32(rd);
+        TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
+
+        tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
+        /* FIXME: if the alignment is wrong, we should restore the value
+         *        in memory. One possible way to achieve this is to probe
+         *        the MMU prior to the memaccess, thay way we could put
+         *        the alignment checks in between the probe and the mem
+         *        access.
+         */
+        gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
+
+        tcg_temp_free_i32(t1);
+        tcg_temp_free_i32(treg);
+        tcg_temp_free_i32(tsize);
+    }
+
+    tcg_temp_free(addr);
+    return true;
+}
+
+static bool trans_sb(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_sbr(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
+}
+
+static bool trans_sbea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_sh(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_shr(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+}
+
+static bool trans_shea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_shi(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_sw(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_swr(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+}
+
+static bool trans_swea(DisasContext *dc, arg_typea *arg)
+{
+    if (trap_userspace(dc, true)) {
+        return true;
+    }
+    TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+    return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+}
+
+static bool trans_swi(DisasContext *dc, arg_typeb *arg)
+{
+    TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+    return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_swx(DisasContext *dc, arg_typea *arg)
+{
+    TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+    TCGLabel *swx_done = gen_new_label();
+    TCGLabel *swx_fail = gen_new_label();
+    TCGv_i32 tval;
+
+    t_sync_flags(dc);
+    sync_jmpstate(dc);
+
+    /* swx does not throw unaligned access errors, so force alignment */
+    tcg_gen_andi_tl(addr, addr, ~3);
+
+    /*
+     * Compare the address vs the one we used during lwx.
+     * On mismatch, the operation fails.  On match, addr dies at the
+     * branch, but we know we can use the equal version in the global.
+     * In either case, addr is no longer needed.
+     */
+    tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
+    tcg_temp_free(addr);
+
+    /*
+     * Compare the value loaded during lwx with current contents of
+     * the reserved location.
+     */
+    tval = tcg_temp_new_i32();
+
+    tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
+                               reg_for_write(dc, arg->rd),
+                               dc->mem_index, MO_TEUL);
+
+    tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
+    tcg_temp_free_i32(tval);
+
+    /* Success */
+    tcg_gen_movi_i32(cpu_msr_c, 0);
+    tcg_gen_br(swx_done);
+
+    /* Failure */
+    gen_set_label(swx_fail);
+    tcg_gen_movi_i32(cpu_msr_c, 1);
+
+    gen_set_label(swx_done);
+
+    /*
+     * Prevent the saved address from working again without another ldx.
+     * Akin to the pseudocode setting reservation = 0.
+     */
+    tcg_gen_movi_tl(cpu_res_addr, -1);
+    return true;
+}
+
 static bool trans_zero(DisasContext *dc, arg_zero *arg)
 {
     /* If opcode_0_illegal, trap.  */
@@ -887,303 +1311,6 @@ static void dec_msr(DisasContext *dc)
     }
 }
 
-static inline void sync_jmpstate(DisasContext *dc)
-{
-    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
-        if (dc->jmp == JMP_DIRECT) {
-            tcg_gen_movi_i32(cpu_btaken, 1);
-        }
-        dc->jmp = JMP_INDIRECT;
-        tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
-    }
-}
-
-static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
-{
-    /* Should be set to true if r1 is used by loadstores.  */
-    bool stackprot = false;
-    TCGv_i32 t32;
-
-    /* All load/stores use ra.  */
-    if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
-        stackprot = true;
-    }
-
-    /* Treat the common cases first.  */
-    if (!dc->type_b) {
-        if (ea) {
-            int addr_size = dc->cpu->cfg.addr_size;
-
-            if (addr_size == 32) {
-                tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
-                return;
-            }
-
-            tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
-            if (addr_size < 64) {
-                /* Mask off out of range bits.  */
-                tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
-            }
-            return;
-        }
-
-        /* If any of the regs is r0, set t to the value of the other reg.  */
-        if (dc->ra == 0) {
-            tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
-            return;
-        } else if (dc->rb == 0) {
-            tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
-            return;
-        }
-
-        if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
-            stackprot = true;
-        }
-
-        t32 = tcg_temp_new_i32();
-        tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
-        tcg_gen_extu_i32_tl(t, t32);
-        tcg_temp_free_i32(t32);
-
-        if (stackprot) {
-            gen_helper_stackprot(cpu_env, t);
-        }
-        return;
-    }
-    /* Immediate.  */
-    t32 = tcg_temp_new_i32();
-    tcg_gen_addi_i32(t32, cpu_R[dc->ra], dec_alu_typeb_imm(dc));
-    tcg_gen_extu_i32_tl(t, t32);
-    tcg_temp_free_i32(t32);
-
-    if (stackprot) {
-        gen_helper_stackprot(cpu_env, t);
-    }
-    return;
-}
-
-static void dec_load(DisasContext *dc)
-{
-    TCGv_i32 v;
-    TCGv addr;
-    unsigned int size;
-    bool rev = false, ex = false, ea = false;
-    int mem_index = dc->mem_index;
-    MemOp mop;
-
-    mop = dc->opcode & 3;
-    size = 1 << mop;
-    if (!dc->type_b) {
-        ea = extract32(dc->ir, 7, 1);
-        rev = extract32(dc->ir, 9, 1);
-        ex = extract32(dc->ir, 10, 1);
-    }
-    mop |= MO_TE;
-    if (rev) {
-        mop ^= MO_BSWAP;
-    }
-
-    if (trap_illegal(dc, size > 4)) {
-        return;
-    }
-
-    if (trap_userspace(dc, ea)) {
-        return;
-    }
-
-    t_sync_flags(dc);
-    addr = tcg_temp_new();
-    compute_ldst_addr(dc, ea, addr);
-    /* Extended addressing bypasses the MMU.  */
-    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
-
-    /*
-     * When doing reverse accesses we need to do two things.
-     *
-     * 1. Reverse the address wrt endianness.
-     * 2. Byteswap the data lanes on the way back into the CPU core.
-     */
-    if (rev && size != 4) {
-        /* Endian reverse the address. t is addr.  */
-        switch (size) {
-            case 1:
-            {
-                tcg_gen_xori_tl(addr, addr, 3);
-                break;
-            }
-
-            case 2:
-                /* 00 -> 10
-                   10 -> 00.  */
-                tcg_gen_xori_tl(addr, addr, 2);
-                break;
-            default:
-                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
-                break;
-        }
-    }
-
-    /* lwx does not throw unaligned access errors, so force alignment */
-    if (ex) {
-        tcg_gen_andi_tl(addr, addr, ~3);
-    }
-
-    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
-    sync_jmpstate(dc);
-
-    /* Verify alignment if needed.  */
-    /*
-     * Microblaze gives MMU faults priority over faults due to
-     * unaligned addresses. That's why we speculatively do the load
-     * into v. If the load succeeds, we verify alignment of the
-     * address and if that succeeds we write into the destination reg.
-     */
-    v = tcg_temp_new_i32();
-    tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
-
-    if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
-        TCGv_i32 t0 = tcg_const_i32(0);
-        TCGv_i32 treg = tcg_const_i32(dc->rd);
-        TCGv_i32 tsize = tcg_const_i32(size - 1);
-
-        tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
-        gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
-
-        tcg_temp_free_i32(t0);
-        tcg_temp_free_i32(treg);
-        tcg_temp_free_i32(tsize);
-    }
-
-    if (ex) {
-        tcg_gen_mov_tl(cpu_res_addr, addr);
-        tcg_gen_mov_i32(cpu_res_val, v);
-    }
-    if (dc->rd) {
-        tcg_gen_mov_i32(cpu_R[dc->rd], v);
-    }
-    tcg_temp_free_i32(v);
-
-    if (ex) { /* lwx */
-        /* no support for AXI exclusive so always clear C */
-        tcg_gen_movi_i32(cpu_msr_c, 0);
-    }
-
-    tcg_temp_free(addr);
-}
-
-static void dec_store(DisasContext *dc)
-{
-    TCGv addr;
-    TCGLabel *swx_skip = NULL;
-    unsigned int size;
-    bool rev = false, ex = false, ea = false;
-    int mem_index = dc->mem_index;
-    MemOp mop;
-
-    mop = dc->opcode & 3;
-    size = 1 << mop;
-    if (!dc->type_b) {
-        ea = extract32(dc->ir, 7, 1);
-        rev = extract32(dc->ir, 9, 1);
-        ex = extract32(dc->ir, 10, 1);
-    }
-    mop |= MO_TE;
-    if (rev) {
-        mop ^= MO_BSWAP;
-    }
-
-    if (trap_illegal(dc, size > 4)) {
-        return;
-    }
-
-    trap_userspace(dc, ea);
-
-    t_sync_flags(dc);
-    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
-    sync_jmpstate(dc);
-    /* SWX needs a temp_local.  */
-    addr = ex ? tcg_temp_local_new() : tcg_temp_new();
-    compute_ldst_addr(dc, ea, addr);
-    /* Extended addressing bypasses the MMU.  */
-    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
-
-    if (ex) { /* swx */
-        TCGv_i32 tval;
-
-        /* swx does not throw unaligned access errors, so force alignment */
-        tcg_gen_andi_tl(addr, addr, ~3);
-
-        tcg_gen_movi_i32(cpu_msr_c, 1);
-        swx_skip = gen_new_label();
-        tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_skip);
-
-        /*
-         * Compare the value loaded at lwx with current contents of
-         * the reserved location.
-         */
-        tval = tcg_temp_new_i32();
-
-        tcg_gen_atomic_cmpxchg_i32(tval, addr, cpu_res_val,
-                                   cpu_R[dc->rd], mem_index,
-                                   mop);
-
-        tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_skip);
-        tcg_gen_movi_i32(cpu_msr_c, 0);
-        tcg_temp_free_i32(tval);
-    }
-
-    if (rev && size != 4) {
-        /* Endian reverse the address. t is addr.  */
-        switch (size) {
-            case 1:
-            {
-                tcg_gen_xori_tl(addr, addr, 3);
-                break;
-            }
-
-            case 2:
-                /* 00 -> 10
-                   10 -> 00.  */
-                /* Force addr into the temp.  */
-                tcg_gen_xori_tl(addr, addr, 2);
-                break;
-            default:
-                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
-                break;
-        }
-    }
-
-    if (!ex) {
-        tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
-    }
-
-    /* Verify alignment if needed.  */
-    if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
-        TCGv_i32 t1 = tcg_const_i32(1);
-        TCGv_i32 treg = tcg_const_i32(dc->rd);
-        TCGv_i32 tsize = tcg_const_i32(size - 1);
-
-        tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
-        /* FIXME: if the alignment is wrong, we should restore the value
-         *        in memory. One possible way to achieve this is to probe
-         *        the MMU prior to the memaccess, thay way we could put
-         *        the alignment checks in between the probe and the mem
-         *        access.
-         */
-        gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
-
-        tcg_temp_free_i32(t1);
-        tcg_temp_free_i32(treg);
-        tcg_temp_free_i32(tsize);
-    }
-
-    if (ex) {
-        gen_set_label(swx_skip);
-    }
-
-    tcg_temp_free(addr);
-}
-
 static inline void eval_cc(DisasContext *dc, unsigned int cc,
                            TCGv_i32 d, TCGv_i32 a)
 {
@@ -1491,8 +1618,6 @@ static struct decoder_info {
     };
     void (*dec)(DisasContext *dc);
 } decinfo[] = {
-    {DEC_LD, dec_load},
-    {DEC_ST, dec_store},
     {DEC_BR, dec_br},
     {DEC_BCC, dec_bcc},
     {DEC_RTS, dec_rts},
-- 
2.25.1



  parent reply	other threads:[~2020-08-28 14:46 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-28 14:18 [PATCH v2 00/76] target/microblaze improvements Richard Henderson
2020-08-28 14:18 ` [PATCH v2 01/76] tests/tcg: Add microblaze to arches filter Richard Henderson
2020-08-31 17:32   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 02/76] tests/tcg: Do not require FE_TOWARDZERO Richard Henderson
2020-08-31 17:31   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 03/76] tests/tcg: Do not require FE_* exception bits Richard Henderson
2020-08-31 17:30   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 04/76] target/microblaze: Tidy gdbstub Richard Henderson
2020-08-28 14:18 ` [PATCH v2 05/76] target/microblaze: Split out PC from env->sregs Richard Henderson
2020-08-28 14:18 ` [PATCH v2 06/76] target/microblaze: Split out MSR " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 07/76] target/microblaze: Split out EAR " Richard Henderson
2020-08-31 20:50   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 08/76] target/microblaze: Split out ESR " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 09/76] target/microblaze: Split out FSR " Richard Henderson
2020-08-31 20:51   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 10/76] target/microblaze: Split out BTR " Richard Henderson
2020-08-31 20:53   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 11/76] target/microblaze: Split out EDR " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 12/76] target/microblaze: Split the cpu_SR array Richard Henderson
2020-08-28 14:18 ` [PATCH v2 13/76] target/microblaze: Fix width of PC and BTARGET Richard Henderson
2020-08-28 14:18 ` [PATCH v2 14/76] target/microblaze: Fix width of MSR Richard Henderson
2020-08-28 14:18 ` [PATCH v2 15/76] target/microblaze: Fix width of ESR Richard Henderson
2020-08-28 14:18 ` [PATCH v2 16/76] target/microblaze: Fix width of FSR Richard Henderson
2020-08-28 14:18 ` [PATCH v2 17/76] target/microblaze: Fix width of BTR Richard Henderson
2020-08-28 14:18 ` [PATCH v2 18/76] target/microblaze: Fix width of EDR Richard Henderson
2020-08-28 14:18 ` [PATCH v2 19/76] target/microblaze: Remove cpu_ear Richard Henderson
2020-08-28 14:18 ` [PATCH v2 20/76] target/microblaze: Tidy raising of exceptions Richard Henderson
2020-08-28 14:18 ` [PATCH v2 21/76] target/microblaze: Mark raise_exception as noreturn Richard Henderson
2020-08-28 14:18 ` [PATCH v2 22/76] target/microblaze: Remove helper_debug and env->debug Richard Henderson
2020-08-28 14:18 ` [PATCH v2 23/76] target/microblaze: Rename env_* tcg variables to cpu_* Richard Henderson
2020-08-31 21:11   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 24/76] target/microblaze: Tidy mb_tcg_init Richard Henderson
2020-08-28 14:18 ` [PATCH v2 25/76] target/microblaze: Split out MSR[C] to its own variable Richard Henderson
2020-08-28 14:18 ` [PATCH v2 26/76] target/microblaze: Use DISAS_NORETURN Richard Henderson
2020-08-28 14:18 ` [PATCH v2 27/76] target/microblaze: Check singlestep_enabled in gen_goto_tb Richard Henderson
2020-08-31 13:54   ` Edgar E. Iglesias
2020-08-31 14:55     ` Richard Henderson
2020-08-28 14:18 ` [PATCH v2 28/76] target/microblaze: Convert to DisasContextBase Richard Henderson
2020-08-28 14:18 ` [PATCH v2 29/76] target/microblaze: Convert to translator_loop Richard Henderson
2020-08-28 14:18 ` [PATCH v2 30/76] target/microblaze: Remove SIM_COMPAT Richard Henderson
2020-08-28 14:18 ` [PATCH v2 31/76] target/microblaze: Remove DISAS_GNU Richard Henderson
2020-08-28 14:18 ` [PATCH v2 32/76] target/microblaze: Remove empty D macros Richard Henderson
2020-08-31 21:14   ` Philippe Mathieu-Daudé
2020-08-28 14:18 ` [PATCH v2 33/76] target/microblaze: Remove LOG_DIS Richard Henderson
2020-08-28 14:18 ` [PATCH v2 34/76] target/microblaze: Ensure imm constant is always available Richard Henderson
2020-08-28 14:18 ` [PATCH v2 35/76] target/microblaze: Add decodetree infrastructure Richard Henderson
2020-08-28 14:18 ` [PATCH v2 36/76] target/microblaze: Convert dec_add to decodetree Richard Henderson
2020-08-28 14:18 ` [PATCH v2 37/76] target/microblaze: Convert dec_sub " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 38/76] target/microblaze: Implement cmp and cmpu inline Richard Henderson
2020-08-28 14:18 ` [PATCH v2 39/76] target/microblaze: Convert dec_pattern to decodetree Richard Henderson
2020-08-28 14:18 ` [PATCH v2 40/76] target/microblaze: Convert dec_and, dec_or, dec_xor " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 41/76] target/microblaze: Convert dec_mul " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 42/76] target/microblaze: Convert dec_div " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 43/76] target/microblaze: Unwind properly when raising divide-by-zero Richard Henderson
2020-08-28 14:18 ` [PATCH v2 44/76] target/microblaze: Convert dec_bit to decodetree Richard Henderson
2020-08-28 14:18 ` [PATCH v2 45/76] target/microblaze: Convert dec_barrel " Richard Henderson
2020-08-28 14:18 ` [PATCH v2 46/76] target/microblaze: Convert dec_imm " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 47/76] target/microblaze: Convert dec_fpu " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 48/76] target/microblaze: Fix cpu unwind for fpu exceptions Richard Henderson
2020-08-28 14:19 ` [PATCH v2 49/76] target/microblaze: Mark fpu helpers TCG_CALL_NO_WG Richard Henderson
2020-08-28 14:19 ` [PATCH v2 50/76] target/microblaze: Replace MSR_EE_FLAG with MSR_EE Richard Henderson
2020-08-31 21:09   ` Philippe Mathieu-Daudé
2020-08-28 14:19 ` [PATCH v2 51/76] target/microblaze: Cache mem_index in DisasContext Richard Henderson
2020-08-28 14:19 ` [PATCH v2 52/76] target/microblaze: Fix cpu unwind for stackprot Richard Henderson
2020-08-28 14:19 ` Richard Henderson [this message]
2020-08-28 14:19 ` [PATCH v2 54/76] target/microblaze: Assert no overlap in flags making up tb_flags Richard Henderson
2020-08-28 14:19 ` [PATCH v2 55/76] target/microblaze: Move bimm to BIMM_FLAG Richard Henderson
2020-08-28 14:19 ` [PATCH v2 56/76] target/microblaze: Fix no-op mb_cpu_transaction_failed Richard Henderson
2020-08-28 14:19 ` [PATCH v2 57/76] target/microblaze: Store "current" iflags in insn_start Richard Henderson
2020-08-28 14:19 ` [PATCH v2 58/76] tcg: Add tcg_get_insn_start_param Richard Henderson
2020-08-28 14:19 ` [PATCH v2 59/76] target/microblaze: Use cc->do_unaligned_access Richard Henderson
2020-08-28 14:19 ` [PATCH v2 60/76] target/microblaze: Replace clear_imm with tb_flags_to_set Richard Henderson
2020-08-28 14:19 ` [PATCH v2 61/76] target/microblaze: Replace delayed_branch " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 62/76] target/microblaze: Tidy mb_cpu_dump_state Richard Henderson
2020-08-31 21:08   ` Philippe Mathieu-Daudé
2020-08-31 22:16     ` Richard Henderson
2020-08-31 22:25       ` Philippe Mathieu-Daudé
2020-08-28 14:19 ` [PATCH v2 63/76] target/microblaze: Convert brk and brki to decodetree Richard Henderson
2020-08-28 14:19 ` [PATCH v2 64/76] target/microblaze: Convert mbar " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 65/76] target/microblaze: Reorganize branching Richard Henderson
2020-08-28 14:19 ` [PATCH v2 66/76] target/microblaze: Convert dec_br to decodetree Richard Henderson
2020-08-28 14:19 ` [PATCH v2 67/76] target/microblaze: Convert dec_bcc " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 68/76] target/microblaze: Convert dec_rts " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 69/76] target/microblaze: Tidy do_rti, do_rtb, do_rte Richard Henderson
2020-08-28 14:19 ` [PATCH v2 70/76] target/microblaze: Convert msrclr, msrset to decodetree Richard Henderson
2020-08-28 14:19 ` [PATCH v2 71/76] target/microblaze: Convert dec_msr " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 72/76] target/microblaze: Convert dec_stream " Richard Henderson
2020-08-28 14:19 ` [PATCH v2 73/76] target/microblaze: Remove last of old decoder Richard Henderson
2020-08-28 14:19 ` [PATCH v2 74/76] target/microblaze: Remove cpu_R[0] Richard Henderson
2020-08-28 14:19 ` [PATCH v2 75/76] target/microblaze: Add flags markup to some helpers Richard Henderson
2020-08-28 14:19 ` [PATCH v2 76/76] target/microblaze: Reduce linux-user address space to 32-bit Richard Henderson
2020-08-31 14:14 ` [PATCH v2 00/76] target/microblaze improvements Edgar E. Iglesias

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200828141929.77854-54-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=edgar.iglesias@xilinx.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.