All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
@ 2015-05-15  4:49 Peter Crosthwaite
  2015-05-15  5:56 ` Edgar E. Iglesias
  2015-05-15 15:41 ` Richard Henderson
  0 siblings, 2 replies; 8+ messages in thread
From: Peter Crosthwaite @ 2015-05-15  4:49 UTC (permalink / raw)
  To: qemu-devel; +Cc: peter.maydell, edgari, afaerber, Peter Crosthwaite

To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
This in turn will then allow support for multi-arch QEMU including both
Microblaze and 64-bit CPU targets (notably AArch64).

Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com>
---
 target-microblaze/helper.c    |   4 +-
 target-microblaze/translate.c | 609 +++++++++++++++++++++---------------------
 2 files changed, 311 insertions(+), 302 deletions(-)

diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c
index 32896f4..839680b 100644
--- a/target-microblaze/helper.c
+++ b/target-microblaze/helper.c
@@ -66,7 +66,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
 
     /* Translate if the MMU is available and enabled.  */
     if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
-        target_ulong vaddr, paddr;
+        uint32_t vaddr, paddr;
         struct microblaze_mmu_lookup lu;
 
         hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
@@ -269,7 +269,7 @@ hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 {
     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
     CPUMBState *env = &cpu->env;
-    target_ulong vaddr, paddr = 0;
+    uint32_t vaddr, paddr = 0;
     struct microblaze_mmu_lookup lu;
     unsigned int hit;
 
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index 4068946..5918d88 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -43,23 +43,28 @@
 #define EXTRACT_FIELD(src, start, end) \
             (((src) >> start) & ((1 << (end - start + 1)) - 1))
 
-static TCGv env_debug;
+/*
+ * Microblaze does not support direct use of the target long type
+ * All TCG vars must be TCGv_i32
+ */
+
+static TCGv_i32 env_debug;
 static TCGv_ptr cpu_env;
-static TCGv cpu_R[32];
-static TCGv cpu_SR[18];
-static TCGv env_imm;
-static TCGv env_btaken;
-static TCGv env_btarget;
-static TCGv env_iflags;
-static TCGv env_res_addr;
-static TCGv env_res_val;
+static TCGv_i32 cpu_R[32];
+static TCGv_i32 cpu_SR[18];
+static TCGv_i32 env_imm;
+static TCGv_i32 env_btaken;
+static TCGv_i32 env_btarget;
+static TCGv_i32 env_iflags;
+static TCGv_i32 env_res_addr;
+static TCGv_i32 env_res_val;
 
 #include "exec/gen-icount.h"
 
 /* This is the state at translation time.  */
 typedef struct DisasContext {
     MicroBlazeCPU *cpu;
-    target_ulong pc;
+    uint32_t pc;
 
     /* Decoder.  */
     int type_b;
@@ -106,7 +111,7 @@ static inline void t_sync_flags(DisasContext *dc)
 {
     /* Synch the tb dependent flags between translator and runtime.  */
     if (dc->tb_flags != dc->synced_flags) {
-        tcg_gen_movi_tl(env_iflags, dc->tb_flags);
+        tcg_gen_movi_i32(env_iflags, dc->tb_flags);
         dc->synced_flags = dc->tb_flags;
     }
 }
@@ -116,53 +121,53 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
     TCGv_i32 tmp = tcg_const_i32(index);
 
     t_sync_flags(dc);
-    tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+    tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
     gen_helper_raise_exception(cpu_env, tmp);
     tcg_temp_free_i32(tmp);
     dc->is_jmp = DISAS_UPDATE;
 }
 
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
 {
     TranslationBlock *tb;
     tb = dc->tb;
     if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
         tcg_gen_goto_tb(n);
-        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+        tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
         tcg_gen_exit_tb((uintptr_t)tb + n);
     } else {
-        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+        tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
         tcg_gen_exit_tb(0);
     }
 }
 
-static void read_carry(DisasContext *dc, TCGv d)
+static void read_carry(DisasContext *dc, TCGv_i32 d)
 {
-    tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
+    tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
 }
 
 /*
  * write_carry sets the carry bits in MSR based on bit 0 of v.
  * v[31:1] are ignored.
  */
-static void write_carry(DisasContext *dc, TCGv v)
+static void write_carry(DisasContext *dc, TCGv_i32 v)
 {
-    TCGv t0 = tcg_temp_new();
-    tcg_gen_shli_tl(t0, v, 31);
-    tcg_gen_sari_tl(t0, t0, 31);
-    tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
-    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
+    TCGv_i32 t0 = tcg_temp_new_i32();
+    tcg_gen_shli_i32(t0, v, 31);
+    tcg_gen_sari_i32(t0, t0, 31);
+    tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
+    tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
                     ~(MSR_C | MSR_CC));
-    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
-    tcg_temp_free(t0);
+    tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
+    tcg_temp_free_i32(t0);
 }
 
 static void write_carryi(DisasContext *dc, bool carry)
 {
-    TCGv t0 = tcg_temp_new();
-    tcg_gen_movi_tl(t0, carry);
+    TCGv_i32 t0 = tcg_temp_new_i32();
+    tcg_gen_movi_i32(t0, carry);
     write_carry(dc, t0);
-    tcg_temp_free(t0);
+    tcg_temp_free_i32(t0);
 }
 
 /* True if ALU operand b is a small immediate that may deserve
@@ -173,13 +178,13 @@ static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
     return dc->type_b && !(dc->tb_flags & IMM_FLAG);
 }
 
-static inline TCGv *dec_alu_op_b(DisasContext *dc)
+static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
 {
     if (dc->type_b) {
         if (dc->tb_flags & IMM_FLAG)
-            tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
+            tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
         else
-            tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
+            tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
         return &env_imm;
     } else
         return &cpu_R[dc->rb];
@@ -188,7 +193,7 @@ static inline TCGv *dec_alu_op_b(DisasContext *dc)
 static void dec_add(DisasContext *dc)
 {
     unsigned int k, c;
-    TCGv cf;
+    TCGv_i32 cf;
 
     k = dc->opcode & 4;
     c = dc->opcode & 2;
@@ -202,15 +207,15 @@ static void dec_add(DisasContext *dc)
         /* k - keep carry, no need to update MSR.  */
         /* If rd == r0, it's a nop.  */
         if (dc->rd) {
-            tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+            tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 
             if (c) {
                 /* c - Add carry into the result.  */
-                cf = tcg_temp_new();
+                cf = tcg_temp_new_i32();
 
                 read_carry(dc, cf);
-                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
-                tcg_temp_free(cf);
+                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+                tcg_temp_free_i32(cf);
             }
         }
         return;
@@ -218,31 +223,31 @@ static void dec_add(DisasContext *dc)
 
     /* From now on, we can assume k is zero.  So we need to update MSR.  */
     /* Extract carry.  */
-    cf = tcg_temp_new();
+    cf = tcg_temp_new_i32();
     if (c) {
         read_carry(dc, cf);
     } else {
-        tcg_gen_movi_tl(cf, 0);
+        tcg_gen_movi_i32(cf, 0);
     }
 
     if (dc->rd) {
-        TCGv ncf = tcg_temp_new();
+        TCGv_i32 ncf = tcg_temp_new_i32();
         gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
-        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
-        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
         write_carry(dc, ncf);
-        tcg_temp_free(ncf);
+        tcg_temp_free_i32(ncf);
     } else {
         gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
         write_carry(dc, cf);
     }
-    tcg_temp_free(cf);
+    tcg_temp_free_i32(cf);
 }
 
 static void dec_sub(DisasContext *dc)
 {
     unsigned int u, cmp, k, c;
-    TCGv cf, na;
+    TCGv_i32 cf, na;
 
     u = dc->imm & 2;
     k = dc->opcode & 4;
@@ -268,15 +273,15 @@ static void dec_sub(DisasContext *dc)
         /* k - keep carry, no need to update MSR.  */
         /* If rd == r0, it's a nop.  */
         if (dc->rd) {
-            tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
+            tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
 
             if (c) {
                 /* c - Add carry into the result.  */
-                cf = tcg_temp_new();
+                cf = tcg_temp_new_i32();
 
                 read_carry(dc, cf);
-                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
-                tcg_temp_free(cf);
+                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+                tcg_temp_free_i32(cf);
             }
         }
         return;
@@ -284,30 +289,30 @@ static void dec_sub(DisasContext *dc)
 
     /* From now on, we can assume k is zero.  So we need to update MSR.  */
     /* Extract carry. And complement a into na.  */
-    cf = tcg_temp_new();
-    na = tcg_temp_new();
+    cf = tcg_temp_new_i32();
+    na = tcg_temp_new_i32();
     if (c) {
         read_carry(dc, cf);
     } else {
-        tcg_gen_movi_tl(cf, 1);
+        tcg_gen_movi_i32(cf, 1);
     }
 
     /* d = b + ~a + c. carry defaults to 1.  */
-    tcg_gen_not_tl(na, cpu_R[dc->ra]);
+    tcg_gen_not_i32(na, cpu_R[dc->ra]);
 
     if (dc->rd) {
-        TCGv ncf = tcg_temp_new();
+        TCGv_i32 ncf = tcg_temp_new_i32();
         gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
-        tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
-        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+        tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
+        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
         write_carry(dc, ncf);
-        tcg_temp_free(ncf);
+        tcg_temp_free_i32(ncf);
     } else {
         gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
         write_carry(dc, cf);
     }
-    tcg_temp_free(cf);
-    tcg_temp_free(na);
+    tcg_temp_free_i32(cf);
+    tcg_temp_free_i32(na);
 }
 
 static void dec_pattern(DisasContext *dc)
@@ -318,7 +323,7 @@ static void dec_pattern(DisasContext *dc)
     if ((dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
           && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
     }
 
@@ -333,29 +338,29 @@ static void dec_pattern(DisasContext *dc)
         case 2:
             LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
             if (dc->rd) {
-                TCGv t0 = tcg_temp_local_new();
+                TCGv_i32 t0 = tcg_temp_local_new_i32();
                 l1 = gen_new_label();
-                tcg_gen_movi_tl(t0, 1);
-                tcg_gen_brcond_tl(TCG_COND_EQ,
+                tcg_gen_movi_i32(t0, 1);
+                tcg_gen_brcond_i32(TCG_COND_EQ,
                                   cpu_R[dc->ra], cpu_R[dc->rb], l1);
-                tcg_gen_movi_tl(t0, 0);
+                tcg_gen_movi_i32(t0, 0);
                 gen_set_label(l1);
-                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
-                tcg_temp_free(t0);
+                tcg_gen_mov_i32(cpu_R[dc->rd], t0);
+                tcg_temp_free_i32(t0);
             }
             break;
         case 3:
             LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
             l1 = gen_new_label();
             if (dc->rd) {
-                TCGv t0 = tcg_temp_local_new();
-                tcg_gen_movi_tl(t0, 1);
-                tcg_gen_brcond_tl(TCG_COND_NE,
+                TCGv_i32 t0 = tcg_temp_local_new_i32();
+                tcg_gen_movi_i32(t0, 1);
+                tcg_gen_brcond_i32(TCG_COND_NE,
                                   cpu_R[dc->ra], cpu_R[dc->rb], l1);
-                tcg_gen_movi_tl(t0, 0);
+                tcg_gen_movi_i32(t0, 0);
                 gen_set_label(l1);
-                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
-                tcg_temp_free(t0);
+                tcg_gen_mov_i32(cpu_R[dc->rd], t0);
+                tcg_temp_free_i32(t0);
             }
             break;
         default:
@@ -381,9 +386,9 @@ static void dec_and(DisasContext *dc)
         return;
 
     if (not) {
-        tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
     } else
-        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 }
 
 static void dec_or(DisasContext *dc)
@@ -395,7 +400,7 @@ static void dec_or(DisasContext *dc)
 
     LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
     if (dc->rd)
-        tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 }
 
 static void dec_xor(DisasContext *dc)
@@ -407,31 +412,31 @@ static void dec_xor(DisasContext *dc)
 
     LOG_DIS("xor r%d\n", dc->rd);
     if (dc->rd)
-        tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 }
 
-static inline void msr_read(DisasContext *dc, TCGv d)
+static inline void msr_read(DisasContext *dc, TCGv_i32 d)
 {
-    tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
+    tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
 }
 
-static inline void msr_write(DisasContext *dc, TCGv v)
+static inline void msr_write(DisasContext *dc, TCGv_i32 v)
 {
-    TCGv t;
+    TCGv_i32 t;
 
-    t = tcg_temp_new();
+    t = tcg_temp_new_i32();
     dc->cpustate_changed = 1;
     /* PVR bit is not writable.  */
-    tcg_gen_andi_tl(t, v, ~MSR_PVR);
-    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
-    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
-    tcg_temp_free(t);
+    tcg_gen_andi_i32(t, v, ~MSR_PVR);
+    tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
+    tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
+    tcg_temp_free_i32(t);
 }
 
 static void dec_msr(DisasContext *dc)
 {
     CPUState *cs = CPU(dc->cpu);
-    TCGv t0, t1;
+    TCGv_i32 t0, t1;
     unsigned int sr, to, rn;
     int mem_index = cpu_mmu_index(&dc->cpu->env);
 
@@ -455,7 +460,7 @@ static void dec_msr(DisasContext *dc)
 
         if ((dc->tb_flags & MSR_EE_FLAG)
             && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
             return;
         }
@@ -463,20 +468,20 @@ static void dec_msr(DisasContext *dc)
         if (dc->rd)
             msr_read(dc, cpu_R[dc->rd]);
 
-        t0 = tcg_temp_new();
-        t1 = tcg_temp_new();
+        t0 = tcg_temp_new_i32();
+        t1 = tcg_temp_new_i32();
         msr_read(dc, t0);
-        tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
+        tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
 
         if (clr) {
-            tcg_gen_not_tl(t1, t1);
-            tcg_gen_and_tl(t0, t0, t1);
+            tcg_gen_not_i32(t1, t1);
+            tcg_gen_and_i32(t0, t0, t1);
         } else
-            tcg_gen_or_tl(t0, t0, t1);
+            tcg_gen_or_i32(t0, t0, t1);
         msr_write(dc, t0);
-        tcg_temp_free(t0);
-        tcg_temp_free(t1);
-	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+        tcg_temp_free_i32(t0);
+        tcg_temp_free_i32(t1);
+        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
         dc->is_jmp = DISAS_UPDATE;
         return;
     }
@@ -484,7 +489,7 @@ static void dec_msr(DisasContext *dc)
     if (to) {
         if ((dc->tb_flags & MSR_EE_FLAG)
              && mem_index == MMU_USER_IDX) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
             return;
         }
@@ -496,9 +501,9 @@ static void dec_msr(DisasContext *dc)
         sr &= 7;
         LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
         if (to)
-            gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
+            gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
         else
-            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
+            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
         return;
     }
 #endif
@@ -512,19 +517,21 @@ static void dec_msr(DisasContext *dc)
                 msr_write(dc, cpu_R[dc->ra]);
                 break;
             case 0x3:
-                tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
+                tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
                 break;
             case 0x5:
-                tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
+                tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
                 break;
             case 0x7:
-                tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
+                tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
                 break;
             case 0x800:
-                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
+                tcg_gen_st_i32(cpu_R[dc->ra], cpu_env,
+                               offsetof(CPUMBState, slr));
                 break;
             case 0x802:
-                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
+                tcg_gen_st_i32(cpu_R[dc->ra], cpu_env,
+                               offsetof(CPUMBState, shr));
                 break;
             default:
                 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
@@ -535,28 +542,30 @@ static void dec_msr(DisasContext *dc)
 
         switch (sr) {
             case 0:
-                tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+                tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
                 break;
             case 1:
                 msr_read(dc, cpu_R[dc->rd]);
                 break;
             case 0x3:
-                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
+                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
                 break;
             case 0x5:
-                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
+                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
                 break;
              case 0x7:
-                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
+                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
                 break;
             case 0xb:
-                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
+                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
                 break;
             case 0x800:
-                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
+                tcg_gen_ld_i32(cpu_R[dc->rd], cpu_env,
+                               offsetof(CPUMBState, slr));
                 break;
             case 0x802:
-                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
+                tcg_gen_ld_i32(cpu_R[dc->rd], cpu_env,
+                               offsetof(CPUMBState, shr));
                 break;
             case 0x2000:
             case 0x2001:
@@ -572,7 +581,7 @@ static void dec_msr(DisasContext *dc)
             case 0x200b:
             case 0x200c:
                 rn = sr & 0xf;
-                tcg_gen_ld_tl(cpu_R[dc->rd],
+                tcg_gen_ld_i32(cpu_R[dc->rd],
                               cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
                 break;
             default:
@@ -582,12 +591,12 @@ static void dec_msr(DisasContext *dc)
     }
 
     if (dc->rd == 0) {
-        tcg_gen_movi_tl(cpu_R[0], 0);
+        tcg_gen_movi_i32(cpu_R[0], 0);
     }
 }
 
 /* 64-bit signed mul, lower result in d and upper in d2.  */
-static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
+static void t_gen_muls(TCGv_i32 d, TCGv_i32 d2, TCGv_i32 a, TCGv_i32 b)
 {
     TCGv_i64 t0, t1;
 
@@ -607,7 +616,7 @@ static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
 }
 
 /* 64-bit unsigned muls, lower result in d and upper in d2.  */
-static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
+static void t_gen_mulu(TCGv_i32 d, TCGv_i32 d2, TCGv_i32 a, TCGv_i32 b)
 {
     TCGv_i64 t0, t1;
 
@@ -629,20 +638,20 @@ static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
 /* Multiplier unit.  */
 static void dec_mul(DisasContext *dc)
 {
-    TCGv d[2];
+    TCGv_i32 d[2];
     unsigned int subcode;
 
     if ((dc->tb_flags & MSR_EE_FLAG)
          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
          && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
 
     subcode = dc->imm & 3;
-    d[0] = tcg_temp_new();
-    d[1] = tcg_temp_new();
+    d[0] = tcg_temp_new_i32();
+    d[1] = tcg_temp_new_i32();
 
     if (dc->type_b) {
         LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
@@ -678,8 +687,8 @@ static void dec_mul(DisasContext *dc)
             break;
     }
 done:
-    tcg_temp_free(d[0]);
-    tcg_temp_free(d[1]);
+    tcg_temp_free_i32(d[0]);
+    tcg_temp_free_i32(d[1]);
 }
 
 /* Div unit.  */
@@ -692,7 +701,7 @@ static void dec_div(DisasContext *dc)
 
     if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
           && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
     }
 
@@ -703,18 +712,18 @@ static void dec_div(DisasContext *dc)
         gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
                         cpu_R[dc->ra]);
     if (!dc->rd)
-        tcg_gen_movi_tl(cpu_R[dc->rd], 0);
+        tcg_gen_movi_i32(cpu_R[dc->rd], 0);
 }
 
 static void dec_barrel(DisasContext *dc)
 {
-    TCGv t0;
+    TCGv_i32 t0;
     unsigned int s, t;
 
     if ((dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
           && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
@@ -725,25 +734,25 @@ static void dec_barrel(DisasContext *dc)
     LOG_DIS("bs%s%s r%d r%d r%d\n",
             s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
 
-    t0 = tcg_temp_new();
+    t0 = tcg_temp_new_i32();
 
-    tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
-    tcg_gen_andi_tl(t0, t0, 31);
+    tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
+    tcg_gen_andi_i32(t0, t0, 31);
 
     if (s)
-        tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+        tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
     else {
         if (t)
-            tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+            tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
         else
-            tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+            tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
     }
 }
 
 static void dec_bit(DisasContext *dc)
 {
     CPUState *cs = CPU(dc->cpu);
-    TCGv t0;
+    TCGv_i32 t0;
     unsigned int op;
     int mem_index = cpu_mmu_index(&dc->cpu->env);
 
@@ -751,16 +760,16 @@ static void dec_bit(DisasContext *dc)
     switch (op) {
         case 0x21:
             /* src.  */
-            t0 = tcg_temp_new();
+            t0 = tcg_temp_new_i32();
 
             LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
-            tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
+            tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
             write_carry(dc, cpu_R[dc->ra]);
             if (dc->rd) {
-                tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
-                tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
+                tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+                tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
             }
-            tcg_temp_free(t0);
+            tcg_temp_free_i32(t0);
             break;
 
         case 0x1:
@@ -772,9 +781,9 @@ static void dec_bit(DisasContext *dc)
             write_carry(dc, cpu_R[dc->ra]);
             if (dc->rd) {
                 if (op == 0x41)
-                    tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+                    tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
                 else
-                    tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+                    tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
             }
             break;
         case 0x60:
@@ -793,7 +802,7 @@ static void dec_bit(DisasContext *dc)
             LOG_DIS("wdc r%d\n", dc->ra);
             if ((dc->tb_flags & MSR_EE_FLAG)
                  && mem_index == MMU_USER_IDX) {
-                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
                 return;
             }
@@ -803,7 +812,7 @@ static void dec_bit(DisasContext *dc)
             LOG_DIS("wic r%d\n", dc->ra);
             if ((dc->tb_flags & MSR_EE_FLAG)
                  && mem_index == MMU_USER_IDX) {
-                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
                 return;
             }
@@ -812,7 +821,7 @@ static void dec_bit(DisasContext *dc)
             if ((dc->tb_flags & MSR_EE_FLAG)
                 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
                 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
-                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
                 t_gen_raise_exception(dc, EXCP_HW_EXCP);
             }
             if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
@@ -840,22 +849,22 @@ static inline void sync_jmpstate(DisasContext *dc)
 {
     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
         if (dc->jmp == JMP_DIRECT) {
-            tcg_gen_movi_tl(env_btaken, 1);
+            tcg_gen_movi_i32(env_btaken, 1);
         }
         dc->jmp = JMP_INDIRECT;
-        tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+        tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
     }
 }
 
 static void dec_imm(DisasContext *dc)
 {
     LOG_DIS("imm %x\n", dc->imm << 16);
-    tcg_gen_movi_tl(env_imm, (dc->imm << 16));
+    tcg_gen_movi_i32(env_imm, (dc->imm << 16));
     dc->tb_flags |= IMM_FLAG;
     dc->clear_imm = 0;
 }
 
-static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
+static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
 {
     unsigned int extimm = dc->tb_flags & IMM_FLAG;
     /* Should be set to one if r1 is used by loadstores.  */
@@ -879,8 +888,8 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
             stackprot = 1;
         }
 
-        *t = tcg_temp_new();
-        tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
+        *t = tcg_temp_new_i32();
+        tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
 
         if (stackprot) {
             gen_helper_stackprot(cpu_env, *t);
@@ -892,12 +901,12 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
         if (dc->imm == 0) {
             return &cpu_R[dc->ra];
         }
-        *t = tcg_temp_new();
-        tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
-        tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
+        *t = tcg_temp_new_i32();
+        tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
+        tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
     } else {
-        *t = tcg_temp_new();
-        tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+        *t = tcg_temp_new_i32();
+        tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
     }
 
     if (stackprot) {
@@ -908,7 +917,7 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
 
 static void dec_load(DisasContext *dc)
 {
-    TCGv t, v, *addr;
+    TCGv_i32 t, v, *addr;
     unsigned int size, rev = 0, ex = 0;
     TCGMemOp mop;
 
@@ -925,7 +934,7 @@ static void dec_load(DisasContext *dc)
 
     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
@@ -951,21 +960,21 @@ static void dec_load(DisasContext *dc)
                    01 -> 10
                    10 -> 10
                    11 -> 00 */
-                TCGv low = tcg_temp_new();
+                TCGv_i32 low = tcg_temp_new_i32();
 
                 /* Force addr into the temp.  */
                 if (addr != &t) {
-                    t = tcg_temp_new();
-                    tcg_gen_mov_tl(t, *addr);
+                    t = tcg_temp_new_i32();
+                    tcg_gen_mov_i32(t, *addr);
                     addr = &t;
                 }
 
-                tcg_gen_andi_tl(low, t, 3);
-                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
-                tcg_gen_andi_tl(t, t, ~3);
-                tcg_gen_or_tl(t, t, low);
-                tcg_gen_mov_tl(env_imm, t);
-                tcg_temp_free(low);
+                tcg_gen_andi_i32(low, t, 3);
+                tcg_gen_sub_i32(low, tcg_const_i32(3), low);
+                tcg_gen_andi_i32(t, t, ~3);
+                tcg_gen_or_i32(t, t, low);
+                tcg_gen_mov_i32(env_imm, t);
+                tcg_temp_free_i32(low);
                 break;
             }
 
@@ -974,11 +983,11 @@ static void dec_load(DisasContext *dc)
                    10 -> 00.  */
                 /* Force addr into the temp.  */
                 if (addr != &t) {
-                    t = tcg_temp_new();
-                    tcg_gen_xori_tl(t, *addr, 2);
+                    t = tcg_temp_new_i32();
+                    tcg_gen_xori_i32(t, *addr, 2);
                     addr = &t;
                 } else {
-                    tcg_gen_xori_tl(t, t, 2);
+                    tcg_gen_xori_i32(t, t, 2);
                 }
                 break;
             default:
@@ -991,11 +1000,11 @@ static void dec_load(DisasContext *dc)
     if (ex) {
         /* Force addr into the temp.  */
         if (addr != &t) {
-            t = tcg_temp_new();
-            tcg_gen_mov_tl(t, *addr);
+            t = tcg_temp_new_i32();
+            tcg_gen_mov_i32(t, *addr);
             addr = &t;
         }
-        tcg_gen_andi_tl(t, t, ~3);
+        tcg_gen_andi_i32(t, t, ~3);
     }
 
     /* If we get a fault on a dslot, the jmpstate better be in sync.  */
@@ -1008,23 +1017,23 @@ static void dec_load(DisasContext *dc)
      * into v. If the load succeeds, we verify alignment of the
      * address and if that succeeds we write into the destination reg.
      */
-    v = tcg_temp_new();
-    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
+    v = tcg_temp_new_i32();
+    tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
 
     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
-        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
-        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
-                            tcg_const_tl(0), tcg_const_tl(size - 1));
+        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+        gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+                            tcg_const_i32(0), tcg_const_i32(size - 1));
     }
 
     if (ex) {
-        tcg_gen_mov_tl(env_res_addr, *addr);
-        tcg_gen_mov_tl(env_res_val, v);
+        tcg_gen_mov_i32(env_res_addr, *addr);
+        tcg_gen_mov_i32(env_res_val, v);
     }
     if (dc->rd) {
-        tcg_gen_mov_tl(cpu_R[dc->rd], v);
+        tcg_gen_mov_i32(cpu_R[dc->rd], v);
     }
-    tcg_temp_free(v);
+    tcg_temp_free_i32(v);
 
     if (ex) { /* lwx */
         /* no support for for AXI exclusive so always clear C */
@@ -1032,12 +1041,12 @@ static void dec_load(DisasContext *dc)
     }
 
     if (addr == &t)
-        tcg_temp_free(t);
+        tcg_temp_free_i32(t);
 }
 
 static void dec_store(DisasContext *dc)
 {
-    TCGv t, *addr, swx_addr;
+    TCGv_i32 t, *addr, swx_addr;
     TCGLabel *swx_skip = NULL;
     unsigned int size, rev = 0, ex = 0;
     TCGMemOp mop;
@@ -1055,7 +1064,7 @@ static void dec_store(DisasContext *dc)
 
     if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
@@ -1067,31 +1076,31 @@ static void dec_store(DisasContext *dc)
     sync_jmpstate(dc);
     addr = compute_ldst_addr(dc, &t);
 
-    swx_addr = tcg_temp_local_new();
+    swx_addr = tcg_temp_local_new_i32();
     if (ex) { /* swx */
-        TCGv tval;
+        TCGv_i32 tval;
 
         /* Force addr into the swx_addr. */
-        tcg_gen_mov_tl(swx_addr, *addr);
+        tcg_gen_mov_i32(swx_addr, *addr);
         addr = &swx_addr;
         /* swx does not throw unaligned access errors, so force alignment */
-        tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
+        tcg_gen_andi_i32(swx_addr, swx_addr, ~3);
 
         write_carryi(dc, 1);
         swx_skip = gen_new_label();
-        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
+        tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
 
         /* Compare the value loaded at lwx with current contents of
            the reserved location.
            FIXME: This only works for system emulation where we can expect
            this compare and the following write to be atomic. For user
            emulation we need to add atomicity between threads.  */
-        tval = tcg_temp_new();
-        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
+        tval = tcg_temp_new_i32();
+        tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
                            MO_TEUL);
-        tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
+        tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
         write_carryi(dc, 0);
-        tcg_temp_free(tval);
+        tcg_temp_free_i32(tval);
     }
 
     if (rev && size != 4) {
@@ -1103,21 +1112,21 @@ static void dec_store(DisasContext *dc)
                    01 -> 10
                    10 -> 10
                    11 -> 00 */
-                TCGv low = tcg_temp_new();
+                TCGv_i32 low = tcg_temp_new_i32();
 
                 /* Force addr into the temp.  */
                 if (addr != &t) {
-                    t = tcg_temp_new();
-                    tcg_gen_mov_tl(t, *addr);
+                    t = tcg_temp_new_i32();
+                    tcg_gen_mov_i32(t, *addr);
                     addr = &t;
                 }
 
-                tcg_gen_andi_tl(low, t, 3);
-                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
-                tcg_gen_andi_tl(t, t, ~3);
-                tcg_gen_or_tl(t, t, low);
-                tcg_gen_mov_tl(env_imm, t);
-                tcg_temp_free(low);
+                tcg_gen_andi_i32(low, t, 3);
+                tcg_gen_sub_i32(low, tcg_const_i32(3), low);
+                tcg_gen_andi_i32(t, t, ~3);
+                tcg_gen_or_i32(t, t, low);
+                tcg_gen_mov_i32(env_imm, t);
+                tcg_temp_free_i32(low);
                 break;
             }
 
@@ -1126,11 +1135,11 @@ static void dec_store(DisasContext *dc)
                    10 -> 00.  */
                 /* Force addr into the temp.  */
                 if (addr != &t) {
-                    t = tcg_temp_new();
-                    tcg_gen_xori_tl(t, *addr, 2);
+                    t = tcg_temp_new_i32();
+                    tcg_gen_xori_i32(t, *addr, 2);
                     addr = &t;
                 } else {
-                    tcg_gen_xori_tl(t, t, 2);
+                    tcg_gen_xori_i32(t, t, 2);
                 }
                 break;
             default:
@@ -1138,51 +1147,52 @@ static void dec_store(DisasContext *dc)
                 break;
         }
     }
-    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
+    tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env),
+                        mop);
 
     /* Verify alignment if needed.  */
     if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
-        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
         /* FIXME: if the alignment is wrong, we should restore the value
          *        in memory. One possible way to achieve this is to probe
          *        the MMU prior to the memaccess, thay way we could put
          *        the alignment checks in between the probe and the mem
          *        access.
          */
-        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
-                            tcg_const_tl(1), tcg_const_tl(size - 1));
+        gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+                            tcg_const_i32(1), tcg_const_i32(size - 1));
     }
 
     if (ex) {
         gen_set_label(swx_skip);
     }
-    tcg_temp_free(swx_addr);
+    tcg_temp_free_i32(swx_addr);
 
     if (addr == &t)
-        tcg_temp_free(t);
+        tcg_temp_free_i32(t);
 }
 
 static inline void eval_cc(DisasContext *dc, unsigned int cc,
-                           TCGv d, TCGv a, TCGv b)
+                           TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
 {
     switch (cc) {
         case CC_EQ:
-            tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
             break;
         case CC_NE:
-            tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
             break;
         case CC_LT:
-            tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
             break;
         case CC_LE:
-            tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
             break;
         case CC_GE:
-            tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
             break;
         case CC_GT:
-            tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
+            tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
             break;
         default:
             cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
@@ -1190,13 +1200,13 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
     }
 }
 
-static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
+static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
 {
     TCGLabel *l1 = gen_new_label();
     /* Conditional jmp.  */
-    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
-    tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
-    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
+    tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
+    tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
+    tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
     gen_set_label(l1);
 }
 
@@ -1213,22 +1223,22 @@ static void dec_bcc(DisasContext *dc)
     if (dslot) {
         dc->delayed_branch = 2;
         dc->tb_flags |= D_FLAG;
-        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
                       cpu_env, offsetof(CPUMBState, bimm));
     }
 
     if (dec_alu_op_b_is_small_imm(dc)) {
         int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
 
-        tcg_gen_movi_tl(env_btarget, dc->pc + offset);
+        tcg_gen_movi_i32(env_btarget, dc->pc + offset);
         dc->jmp = JMP_DIRECT_CC;
         dc->jmp_pc = dc->pc + offset;
     } else {
         dc->jmp = JMP_INDIRECT;
-        tcg_gen_movi_tl(env_btarget, dc->pc);
-        tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+        tcg_gen_movi_i32(env_btarget, dc->pc);
+        tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
     }
-    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
+    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
 }
 
 static void dec_br(DisasContext *dc)
@@ -1254,7 +1264,7 @@ static void dec_br(DisasContext *dc)
             tcg_gen_st_i32(tmp_1, cpu_env,
                            -offsetof(MicroBlazeCPU, env)
                            +offsetof(CPUState, halted));
-            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+            tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
             gen_helper_raise_exception(cpu_env, tmp_hlt);
             tcg_temp_free_i32(tmp_hlt);
             tcg_temp_free_i32(tmp_1);
@@ -1275,22 +1285,22 @@ static void dec_br(DisasContext *dc)
     if (dslot) {
         dc->delayed_branch = 2;
         dc->tb_flags |= D_FLAG;
-        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
                       cpu_env, offsetof(CPUMBState, bimm));
     }
     if (link && dc->rd)
-        tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+        tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
 
     dc->jmp = JMP_INDIRECT;
     if (abs) {
-        tcg_gen_movi_tl(env_btaken, 1);
-        tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
+        tcg_gen_movi_i32(env_btaken, 1);
+        tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
         if (link && !dslot) {
             if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
                 t_gen_raise_exception(dc, EXCP_BREAK);
             if (dc->imm == 0) {
                 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
-                    tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+                    tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
                     t_gen_raise_exception(dc, EXCP_HW_EXCP);
                     return;
                 }
@@ -1303,63 +1313,63 @@ static void dec_br(DisasContext *dc)
             dc->jmp = JMP_DIRECT;
             dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
         } else {
-            tcg_gen_movi_tl(env_btaken, 1);
-            tcg_gen_movi_tl(env_btarget, dc->pc);
-            tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+            tcg_gen_movi_i32(env_btaken, 1);
+            tcg_gen_movi_i32(env_btarget, dc->pc);
+            tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
         }
     }
 }
 
 static inline void do_rti(DisasContext *dc)
 {
-    TCGv t0, t1;
-    t0 = tcg_temp_new();
-    t1 = tcg_temp_new();
-    tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
-    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
-    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
-
-    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
-    tcg_gen_or_tl(t1, t1, t0);
+    TCGv_i32 t0, t1;
+    t0 = tcg_temp_new_i32();
+    t1 = tcg_temp_new_i32();
+    tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
+    tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
+    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
+
+    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+    tcg_gen_or_i32(t1, t1, t0);
     msr_write(dc, t1);
-    tcg_temp_free(t1);
-    tcg_temp_free(t0);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t0);
     dc->tb_flags &= ~DRTI_FLAG;
 }
 
 static inline void do_rtb(DisasContext *dc)
 {
-    TCGv t0, t1;
-    t0 = tcg_temp_new();
-    t1 = tcg_temp_new();
-    tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
-    tcg_gen_shri_tl(t0, t1, 1);
-    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
-
-    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
-    tcg_gen_or_tl(t1, t1, t0);
+    TCGv_i32 t0, t1;
+    t0 = tcg_temp_new_i32();
+    t1 = tcg_temp_new_i32();
+    tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
+    tcg_gen_shri_i32(t0, t1, 1);
+    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
+
+    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+    tcg_gen_or_i32(t1, t1, t0);
     msr_write(dc, t1);
-    tcg_temp_free(t1);
-    tcg_temp_free(t0);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t0);
     dc->tb_flags &= ~DRTB_FLAG;
 }
 
 static inline void do_rte(DisasContext *dc)
 {
-    TCGv t0, t1;
-    t0 = tcg_temp_new();
-    t1 = tcg_temp_new();
+    TCGv_i32 t0, t1;
+    t0 = tcg_temp_new_i32();
+    t1 = tcg_temp_new_i32();
 
-    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
-    tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
-    tcg_gen_shri_tl(t0, t1, 1);
-    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
+    tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
+    tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
+    tcg_gen_shri_i32(t0, t1, 1);
+    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
 
-    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
-    tcg_gen_or_tl(t1, t1, t0);
+    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+    tcg_gen_or_i32(t1, t1, t0);
     msr_write(dc, t1);
-    tcg_temp_free(t1);
-    tcg_temp_free(t0);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t0);
     dc->tb_flags &= ~DRTE_FLAG;
 }
 
@@ -1374,14 +1384,14 @@ static void dec_rts(DisasContext *dc)
 
     dc->delayed_branch = 2;
     dc->tb_flags |= D_FLAG;
-    tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+    tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
                   cpu_env, offsetof(CPUMBState, bimm));
 
     if (i_bit) {
         LOG_DIS("rtid ir=%x\n", dc->ir);
         if ((dc->tb_flags & MSR_EE_FLAG)
              && mem_index == MMU_USER_IDX) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
         }
         dc->tb_flags |= DRTI_FLAG;
@@ -1389,7 +1399,7 @@ static void dec_rts(DisasContext *dc)
         LOG_DIS("rtbd ir=%x\n", dc->ir);
         if ((dc->tb_flags & MSR_EE_FLAG)
              && mem_index == MMU_USER_IDX) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
         }
         dc->tb_flags |= DRTB_FLAG;
@@ -1397,7 +1407,7 @@ static void dec_rts(DisasContext *dc)
         LOG_DIS("rted ir=%x\n", dc->ir);
         if ((dc->tb_flags & MSR_EE_FLAG)
              && mem_index == MMU_USER_IDX) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
         }
         dc->tb_flags |= DRTE_FLAG;
@@ -1405,8 +1415,8 @@ static void dec_rts(DisasContext *dc)
         LOG_DIS("rts ir=%x\n", dc->ir);
 
     dc->jmp = JMP_INDIRECT;
-    tcg_gen_movi_tl(env_btaken, 1);
-    tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+    tcg_gen_movi_i32(env_btaken, 1);
+    tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 }
 
 static int dec_check_fpuv2(DisasContext *dc)
@@ -1416,7 +1426,7 @@ static int dec_check_fpuv2(DisasContext *dc)
     r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
 
     if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
     }
     return r;
@@ -1429,7 +1439,7 @@ static void dec_fpu(DisasContext *dc)
     if ((dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
           && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
@@ -1531,7 +1541,7 @@ static void dec_null(DisasContext *dc)
 {
     if ((dc->tb_flags & MSR_EE_FLAG)
           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
@@ -1550,29 +1560,29 @@ static void dec_stream(DisasContext *dc)
             dc->type_b ? "" : "d", dc->imm);
 
     if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
-        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
         t_gen_raise_exception(dc, EXCP_HW_EXCP);
         return;
     }
 
-    t_id = tcg_temp_new();
+    t_id = tcg_temp_new_i32();
     if (dc->type_b) {
-        tcg_gen_movi_tl(t_id, dc->imm & 0xf);
+        tcg_gen_movi_i32(t_id, dc->imm & 0xf);
         ctrl = dc->imm >> 10;
     } else {
-        tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
+        tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
         ctrl = dc->imm >> 5;
     }
 
-    t_ctrl = tcg_const_tl(ctrl);
+    t_ctrl = tcg_const_i32(ctrl);
 
     if (dc->rd == 0) {
         gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
     } else {
         gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
     }
-    tcg_temp_free(t_id);
-    tcg_temp_free(t_ctrl);
+    tcg_temp_free_i32(t_id);
+    tcg_temp_free_i32(t_ctrl);
 }
 
 static struct decoder_info {
@@ -1620,7 +1630,7 @@ static inline void decode(DisasContext *dc, uint32_t ir)
         if ((dc->tb_flags & MSR_EE_FLAG)
               && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
               && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
-            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
             t_gen_raise_exception(dc, EXCP_HW_EXCP);
             return;
         }
@@ -1675,8 +1685,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
     int j, lj;
     struct DisasContext ctx;
     struct DisasContext *dc = &ctx;
-    uint32_t next_page_start, org_flags;
-    target_ulong npc;
+    uint32_t next_page_start, org_flags, npc;
     int num_insns;
     int max_insns;
 
@@ -1720,7 +1729,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
     {
 #if SIM_COMPAT
         if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
-            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+            tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
             gen_helper_debug();
         }
 #endif
@@ -1764,7 +1773,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
                 dc->tb_flags &= ~D_FLAG;
                 /* If it is a direct jump, try direct chaining.  */
                 if (dc->jmp == JMP_INDIRECT) {
-                    eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
+                    eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
                     dc->is_jmp = DISAS_JUMP;
                 } else if (dc->jmp == JMP_DIRECT) {
                     t_sync_flags(dc);
@@ -1774,7 +1783,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
                     TCGLabel *l1 = gen_new_label();
                     t_sync_flags(dc);
                     /* Conditional jmp.  */
-                    tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
+                    tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
                     gen_goto_tb(dc, 1, dc->pc);
                     gen_set_label(l1);
                     gen_goto_tb(dc, 0, dc->jmp_pc);
@@ -1797,7 +1806,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
     if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
         if (dc->tb_flags & D_FLAG) {
             dc->is_jmp = DISAS_UPDATE;
-            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+            tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
             sync_jmpstate(dc);
         } else
             npc = dc->jmp_pc;
@@ -1809,7 +1818,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
     if (dc->is_jmp == DISAS_NEXT
         && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
         dc->is_jmp = DISAS_UPDATE;
-        tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+        tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
     }
     t_sync_flags(dc);
 
@@ -1817,7 +1826,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
         TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
 
         if (dc->is_jmp != DISAS_JUMP) {
-            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+            tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
         }
         gen_helper_raise_exception(cpu_env, tmp);
         tcg_temp_free_i32(tmp);
@@ -1922,34 +1931,34 @@ void mb_tcg_init(void)
 
     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 
-    env_debug = tcg_global_mem_new(TCG_AREG0, 
+    env_debug = tcg_global_mem_new_i32(TCG_AREG0,
                     offsetof(CPUMBState, debug),
                     "debug0");
-    env_iflags = tcg_global_mem_new(TCG_AREG0, 
+    env_iflags = tcg_global_mem_new_i32(TCG_AREG0,
                     offsetof(CPUMBState, iflags),
                     "iflags");
-    env_imm = tcg_global_mem_new(TCG_AREG0, 
+    env_imm = tcg_global_mem_new_i32(TCG_AREG0,
                     offsetof(CPUMBState, imm),
                     "imm");
-    env_btarget = tcg_global_mem_new(TCG_AREG0,
+    env_btarget = tcg_global_mem_new_i32(TCG_AREG0,
                      offsetof(CPUMBState, btarget),
                      "btarget");
-    env_btaken = tcg_global_mem_new(TCG_AREG0,
+    env_btaken = tcg_global_mem_new_i32(TCG_AREG0,
                      offsetof(CPUMBState, btaken),
                      "btaken");
-    env_res_addr = tcg_global_mem_new(TCG_AREG0,
+    env_res_addr = tcg_global_mem_new_i32(TCG_AREG0,
                      offsetof(CPUMBState, res_addr),
                      "res_addr");
-    env_res_val = tcg_global_mem_new(TCG_AREG0,
+    env_res_val = tcg_global_mem_new_i32(TCG_AREG0,
                      offsetof(CPUMBState, res_val),
                      "res_val");
     for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
-        cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
+        cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
                           offsetof(CPUMBState, regs[i]),
                           regnames[i]);
     }
     for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
-        cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
+        cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
                           offsetof(CPUMBState, sregs[i]),
                           special_regnames[i]);
     }
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15  4:49 [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong Peter Crosthwaite
@ 2015-05-15  5:56 ` Edgar E. Iglesias
  2015-05-15 15:41 ` Richard Henderson
  1 sibling, 0 replies; 8+ messages in thread
From: Edgar E. Iglesias @ 2015-05-15  5:56 UTC (permalink / raw)
  To: Peter Crosthwaite
  Cc: peter.maydell, edgari, Peter Crosthwaite, qemu-devel, afaerber

On Thu, May 14, 2015 at 09:49:51PM -0700, Peter Crosthwaite wrote:
> To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
> This in turn will then allow support for multi-arch QEMU including both
> Microblaze and 64-bit CPU targets (notably AArch64).
> 
> Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com>

Looks good to me

Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>


> ---
>  target-microblaze/helper.c    |   4 +-
>  target-microblaze/translate.c | 609 +++++++++++++++++++++---------------------
>  2 files changed, 311 insertions(+), 302 deletions(-)
> 
> diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c
> index 32896f4..839680b 100644
> --- a/target-microblaze/helper.c
> +++ b/target-microblaze/helper.c
> @@ -66,7 +66,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
>  
>      /* Translate if the MMU is available and enabled.  */
>      if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
> -        target_ulong vaddr, paddr;
> +        uint32_t vaddr, paddr;
>          struct microblaze_mmu_lookup lu;
>  
>          hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
> @@ -269,7 +269,7 @@ hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
>  {
>      MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
>      CPUMBState *env = &cpu->env;
> -    target_ulong vaddr, paddr = 0;
> +    uint32_t vaddr, paddr = 0;
>      struct microblaze_mmu_lookup lu;
>      unsigned int hit;
>  
> diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
> index 4068946..5918d88 100644
> --- a/target-microblaze/translate.c
> +++ b/target-microblaze/translate.c
> @@ -43,23 +43,28 @@
>  #define EXTRACT_FIELD(src, start, end) \
>              (((src) >> start) & ((1 << (end - start + 1)) - 1))
>  
> -static TCGv env_debug;
> +/*
> + * Microblaze does not support direct use of the target long type
> + * All TCG vars must be TCGv_i32
> + */
> +
> +static TCGv_i32 env_debug;
>  static TCGv_ptr cpu_env;
> -static TCGv cpu_R[32];
> -static TCGv cpu_SR[18];
> -static TCGv env_imm;
> -static TCGv env_btaken;
> -static TCGv env_btarget;
> -static TCGv env_iflags;
> -static TCGv env_res_addr;
> -static TCGv env_res_val;
> +static TCGv_i32 cpu_R[32];
> +static TCGv_i32 cpu_SR[18];
> +static TCGv_i32 env_imm;
> +static TCGv_i32 env_btaken;
> +static TCGv_i32 env_btarget;
> +static TCGv_i32 env_iflags;
> +static TCGv_i32 env_res_addr;
> +static TCGv_i32 env_res_val;
>  
>  #include "exec/gen-icount.h"
>  
>  /* This is the state at translation time.  */
>  typedef struct DisasContext {
>      MicroBlazeCPU *cpu;
> -    target_ulong pc;
> +    uint32_t pc;
>  
>      /* Decoder.  */
>      int type_b;
> @@ -106,7 +111,7 @@ static inline void t_sync_flags(DisasContext *dc)
>  {
>      /* Synch the tb dependent flags between translator and runtime.  */
>      if (dc->tb_flags != dc->synced_flags) {
> -        tcg_gen_movi_tl(env_iflags, dc->tb_flags);
> +        tcg_gen_movi_i32(env_iflags, dc->tb_flags);
>          dc->synced_flags = dc->tb_flags;
>      }
>  }
> @@ -116,53 +121,53 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
>      TCGv_i32 tmp = tcg_const_i32(index);
>  
>      t_sync_flags(dc);
> -    tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
> +    tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
>      gen_helper_raise_exception(cpu_env, tmp);
>      tcg_temp_free_i32(tmp);
>      dc->is_jmp = DISAS_UPDATE;
>  }
>  
> -static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
> +static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
>  {
>      TranslationBlock *tb;
>      tb = dc->tb;
>      if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
>          tcg_gen_goto_tb(n);
> -        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
>          tcg_gen_exit_tb((uintptr_t)tb + n);
>      } else {
> -        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
>          tcg_gen_exit_tb(0);
>      }
>  }
>  
> -static void read_carry(DisasContext *dc, TCGv d)
> +static void read_carry(DisasContext *dc, TCGv_i32 d)
>  {
> -    tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
> +    tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
>  }
>  
>  /*
>   * write_carry sets the carry bits in MSR based on bit 0 of v.
>   * v[31:1] are ignored.
>   */
> -static void write_carry(DisasContext *dc, TCGv v)
> +static void write_carry(DisasContext *dc, TCGv_i32 v)
>  {
> -    TCGv t0 = tcg_temp_new();
> -    tcg_gen_shli_tl(t0, v, 31);
> -    tcg_gen_sari_tl(t0, t0, 31);
> -    tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
> -    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
> +    TCGv_i32 t0 = tcg_temp_new_i32();
> +    tcg_gen_shli_i32(t0, v, 31);
> +    tcg_gen_sari_i32(t0, t0, 31);
> +    tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
> +    tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
>                      ~(MSR_C | MSR_CC));
> -    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
> -    tcg_temp_free(t0);
> +    tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
> +    tcg_temp_free_i32(t0);
>  }
>  
>  static void write_carryi(DisasContext *dc, bool carry)
>  {
> -    TCGv t0 = tcg_temp_new();
> -    tcg_gen_movi_tl(t0, carry);
> +    TCGv_i32 t0 = tcg_temp_new_i32();
> +    tcg_gen_movi_i32(t0, carry);
>      write_carry(dc, t0);
> -    tcg_temp_free(t0);
> +    tcg_temp_free_i32(t0);
>  }
>  
>  /* True if ALU operand b is a small immediate that may deserve
> @@ -173,13 +178,13 @@ static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
>      return dc->type_b && !(dc->tb_flags & IMM_FLAG);
>  }
>  
> -static inline TCGv *dec_alu_op_b(DisasContext *dc)
> +static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
>  {
>      if (dc->type_b) {
>          if (dc->tb_flags & IMM_FLAG)
> -            tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
> +            tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
>          else
> -            tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
> +            tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
>          return &env_imm;
>      } else
>          return &cpu_R[dc->rb];
> @@ -188,7 +193,7 @@ static inline TCGv *dec_alu_op_b(DisasContext *dc)
>  static void dec_add(DisasContext *dc)
>  {
>      unsigned int k, c;
> -    TCGv cf;
> +    TCGv_i32 cf;
>  
>      k = dc->opcode & 4;
>      c = dc->opcode & 2;
> @@ -202,15 +207,15 @@ static void dec_add(DisasContext *dc)
>          /* k - keep carry, no need to update MSR.  */
>          /* If rd == r0, it's a nop.  */
>          if (dc->rd) {
> -            tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +            tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>  
>              if (c) {
>                  /* c - Add carry into the result.  */
> -                cf = tcg_temp_new();
> +                cf = tcg_temp_new_i32();
>  
>                  read_carry(dc, cf);
> -                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> -                tcg_temp_free(cf);
> +                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> +                tcg_temp_free_i32(cf);
>              }
>          }
>          return;
> @@ -218,31 +223,31 @@ static void dec_add(DisasContext *dc)
>  
>      /* From now on, we can assume k is zero.  So we need to update MSR.  */
>      /* Extract carry.  */
> -    cf = tcg_temp_new();
> +    cf = tcg_temp_new_i32();
>      if (c) {
>          read_carry(dc, cf);
>      } else {
> -        tcg_gen_movi_tl(cf, 0);
> +        tcg_gen_movi_i32(cf, 0);
>      }
>  
>      if (dc->rd) {
> -        TCGv ncf = tcg_temp_new();
> +        TCGv_i32 ncf = tcg_temp_new_i32();
>          gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
> -        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> -        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> +        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
>          write_carry(dc, ncf);
> -        tcg_temp_free(ncf);
> +        tcg_temp_free_i32(ncf);
>      } else {
>          gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
>          write_carry(dc, cf);
>      }
> -    tcg_temp_free(cf);
> +    tcg_temp_free_i32(cf);
>  }
>  
>  static void dec_sub(DisasContext *dc)
>  {
>      unsigned int u, cmp, k, c;
> -    TCGv cf, na;
> +    TCGv_i32 cf, na;
>  
>      u = dc->imm & 2;
>      k = dc->opcode & 4;
> @@ -268,15 +273,15 @@ static void dec_sub(DisasContext *dc)
>          /* k - keep carry, no need to update MSR.  */
>          /* If rd == r0, it's a nop.  */
>          if (dc->rd) {
> -            tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
> +            tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
>  
>              if (c) {
>                  /* c - Add carry into the result.  */
> -                cf = tcg_temp_new();
> +                cf = tcg_temp_new_i32();
>  
>                  read_carry(dc, cf);
> -                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> -                tcg_temp_free(cf);
> +                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> +                tcg_temp_free_i32(cf);
>              }
>          }
>          return;
> @@ -284,30 +289,30 @@ static void dec_sub(DisasContext *dc)
>  
>      /* From now on, we can assume k is zero.  So we need to update MSR.  */
>      /* Extract carry. And complement a into na.  */
> -    cf = tcg_temp_new();
> -    na = tcg_temp_new();
> +    cf = tcg_temp_new_i32();
> +    na = tcg_temp_new_i32();
>      if (c) {
>          read_carry(dc, cf);
>      } else {
> -        tcg_gen_movi_tl(cf, 1);
> +        tcg_gen_movi_i32(cf, 1);
>      }
>  
>      /* d = b + ~a + c. carry defaults to 1.  */
> -    tcg_gen_not_tl(na, cpu_R[dc->ra]);
> +    tcg_gen_not_i32(na, cpu_R[dc->ra]);
>  
>      if (dc->rd) {
> -        TCGv ncf = tcg_temp_new();
> +        TCGv_i32 ncf = tcg_temp_new_i32();
>          gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
> -        tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
> -        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
> +        tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
> +        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
>          write_carry(dc, ncf);
> -        tcg_temp_free(ncf);
> +        tcg_temp_free_i32(ncf);
>      } else {
>          gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
>          write_carry(dc, cf);
>      }
> -    tcg_temp_free(cf);
> -    tcg_temp_free(na);
> +    tcg_temp_free_i32(cf);
> +    tcg_temp_free_i32(na);
>  }
>  
>  static void dec_pattern(DisasContext *dc)
> @@ -318,7 +323,7 @@ static void dec_pattern(DisasContext *dc)
>      if ((dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>            && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>      }
>  
> @@ -333,29 +338,29 @@ static void dec_pattern(DisasContext *dc)
>          case 2:
>              LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
>              if (dc->rd) {
> -                TCGv t0 = tcg_temp_local_new();
> +                TCGv_i32 t0 = tcg_temp_local_new_i32();
>                  l1 = gen_new_label();
> -                tcg_gen_movi_tl(t0, 1);
> -                tcg_gen_brcond_tl(TCG_COND_EQ,
> +                tcg_gen_movi_i32(t0, 1);
> +                tcg_gen_brcond_i32(TCG_COND_EQ,
>                                    cpu_R[dc->ra], cpu_R[dc->rb], l1);
> -                tcg_gen_movi_tl(t0, 0);
> +                tcg_gen_movi_i32(t0, 0);
>                  gen_set_label(l1);
> -                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
> -                tcg_temp_free(t0);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], t0);
> +                tcg_temp_free_i32(t0);
>              }
>              break;
>          case 3:
>              LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
>              l1 = gen_new_label();
>              if (dc->rd) {
> -                TCGv t0 = tcg_temp_local_new();
> -                tcg_gen_movi_tl(t0, 1);
> -                tcg_gen_brcond_tl(TCG_COND_NE,
> +                TCGv_i32 t0 = tcg_temp_local_new_i32();
> +                tcg_gen_movi_i32(t0, 1);
> +                tcg_gen_brcond_i32(TCG_COND_NE,
>                                    cpu_R[dc->ra], cpu_R[dc->rb], l1);
> -                tcg_gen_movi_tl(t0, 0);
> +                tcg_gen_movi_i32(t0, 0);
>                  gen_set_label(l1);
> -                tcg_gen_mov_tl(cpu_R[dc->rd], t0);
> -                tcg_temp_free(t0);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], t0);
> +                tcg_temp_free_i32(t0);
>              }
>              break;
>          default:
> @@ -381,9 +386,9 @@ static void dec_and(DisasContext *dc)
>          return;
>  
>      if (not) {
> -        tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>      } else
> -        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>  }
>  
>  static void dec_or(DisasContext *dc)
> @@ -395,7 +400,7 @@ static void dec_or(DisasContext *dc)
>  
>      LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
>      if (dc->rd)
> -        tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>  }
>  
>  static void dec_xor(DisasContext *dc)
> @@ -407,31 +412,31 @@ static void dec_xor(DisasContext *dc)
>  
>      LOG_DIS("xor r%d\n", dc->rd);
>      if (dc->rd)
> -        tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>  }
>  
> -static inline void msr_read(DisasContext *dc, TCGv d)
> +static inline void msr_read(DisasContext *dc, TCGv_i32 d)
>  {
> -    tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
> +    tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
>  }
>  
> -static inline void msr_write(DisasContext *dc, TCGv v)
> +static inline void msr_write(DisasContext *dc, TCGv_i32 v)
>  {
> -    TCGv t;
> +    TCGv_i32 t;
>  
> -    t = tcg_temp_new();
> +    t = tcg_temp_new_i32();
>      dc->cpustate_changed = 1;
>      /* PVR bit is not writable.  */
> -    tcg_gen_andi_tl(t, v, ~MSR_PVR);
> -    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
> -    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
> -    tcg_temp_free(t);
> +    tcg_gen_andi_i32(t, v, ~MSR_PVR);
> +    tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
> +    tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
> +    tcg_temp_free_i32(t);
>  }
>  
>  static void dec_msr(DisasContext *dc)
>  {
>      CPUState *cs = CPU(dc->cpu);
> -    TCGv t0, t1;
> +    TCGv_i32 t0, t1;
>      unsigned int sr, to, rn;
>      int mem_index = cpu_mmu_index(&dc->cpu->env);
>  
> @@ -455,7 +460,7 @@ static void dec_msr(DisasContext *dc)
>  
>          if ((dc->tb_flags & MSR_EE_FLAG)
>              && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>              return;
>          }
> @@ -463,20 +468,20 @@ static void dec_msr(DisasContext *dc)
>          if (dc->rd)
>              msr_read(dc, cpu_R[dc->rd]);
>  
> -        t0 = tcg_temp_new();
> -        t1 = tcg_temp_new();
> +        t0 = tcg_temp_new_i32();
> +        t1 = tcg_temp_new_i32();
>          msr_read(dc, t0);
> -        tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
> +        tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
>  
>          if (clr) {
> -            tcg_gen_not_tl(t1, t1);
> -            tcg_gen_and_tl(t0, t0, t1);
> +            tcg_gen_not_i32(t1, t1);
> +            tcg_gen_and_i32(t0, t0, t1);
>          } else
> -            tcg_gen_or_tl(t0, t0, t1);
> +            tcg_gen_or_i32(t0, t0, t1);
>          msr_write(dc, t0);
> -        tcg_temp_free(t0);
> -        tcg_temp_free(t1);
> -	tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
> +        tcg_temp_free_i32(t0);
> +        tcg_temp_free_i32(t1);
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
>          dc->is_jmp = DISAS_UPDATE;
>          return;
>      }
> @@ -484,7 +489,7 @@ static void dec_msr(DisasContext *dc)
>      if (to) {
>          if ((dc->tb_flags & MSR_EE_FLAG)
>               && mem_index == MMU_USER_IDX) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>              return;
>          }
> @@ -496,9 +501,9 @@ static void dec_msr(DisasContext *dc)
>          sr &= 7;
>          LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
>          if (to)
> -            gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
> +            gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
>          else
> -            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
> +            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
>          return;
>      }
>  #endif
> @@ -512,19 +517,21 @@ static void dec_msr(DisasContext *dc)
>                  msr_write(dc, cpu_R[dc->ra]);
>                  break;
>              case 0x3:
> -                tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
> +                tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
>                  break;
>              case 0x5:
> -                tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
> +                tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
>                  break;
>              case 0x7:
> -                tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
> +                tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
>                  break;
>              case 0x800:
> -                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
> +                tcg_gen_st_i32(cpu_R[dc->ra], cpu_env,
> +                               offsetof(CPUMBState, slr));
>                  break;
>              case 0x802:
> -                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
> +                tcg_gen_st_i32(cpu_R[dc->ra], cpu_env,
> +                               offsetof(CPUMBState, shr));
>                  break;
>              default:
>                  cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
> @@ -535,28 +542,30 @@ static void dec_msr(DisasContext *dc)
>  
>          switch (sr) {
>              case 0:
> -                tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
> +                tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
>                  break;
>              case 1:
>                  msr_read(dc, cpu_R[dc->rd]);
>                  break;
>              case 0x3:
> -                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
>                  break;
>              case 0x5:
> -                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
>                  break;
>               case 0x7:
> -                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
>                  break;
>              case 0xb:
> -                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
> +                tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
>                  break;
>              case 0x800:
> -                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
> +                tcg_gen_ld_i32(cpu_R[dc->rd], cpu_env,
> +                               offsetof(CPUMBState, slr));
>                  break;
>              case 0x802:
> -                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
> +                tcg_gen_ld_i32(cpu_R[dc->rd], cpu_env,
> +                               offsetof(CPUMBState, shr));
>                  break;
>              case 0x2000:
>              case 0x2001:
> @@ -572,7 +581,7 @@ static void dec_msr(DisasContext *dc)
>              case 0x200b:
>              case 0x200c:
>                  rn = sr & 0xf;
> -                tcg_gen_ld_tl(cpu_R[dc->rd],
> +                tcg_gen_ld_i32(cpu_R[dc->rd],
>                                cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
>                  break;
>              default:
> @@ -582,12 +591,12 @@ static void dec_msr(DisasContext *dc)
>      }
>  
>      if (dc->rd == 0) {
> -        tcg_gen_movi_tl(cpu_R[0], 0);
> +        tcg_gen_movi_i32(cpu_R[0], 0);
>      }
>  }
>  
>  /* 64-bit signed mul, lower result in d and upper in d2.  */
> -static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
> +static void t_gen_muls(TCGv_i32 d, TCGv_i32 d2, TCGv_i32 a, TCGv_i32 b)
>  {
>      TCGv_i64 t0, t1;
>  
> @@ -607,7 +616,7 @@ static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
>  }
>  
>  /* 64-bit unsigned muls, lower result in d and upper in d2.  */
> -static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
> +static void t_gen_mulu(TCGv_i32 d, TCGv_i32 d2, TCGv_i32 a, TCGv_i32 b)
>  {
>      TCGv_i64 t0, t1;
>  
> @@ -629,20 +638,20 @@ static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
>  /* Multiplier unit.  */
>  static void dec_mul(DisasContext *dc)
>  {
> -    TCGv d[2];
> +    TCGv_i32 d[2];
>      unsigned int subcode;
>  
>      if ((dc->tb_flags & MSR_EE_FLAG)
>           && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>           && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
>  
>      subcode = dc->imm & 3;
> -    d[0] = tcg_temp_new();
> -    d[1] = tcg_temp_new();
> +    d[0] = tcg_temp_new_i32();
> +    d[1] = tcg_temp_new_i32();
>  
>      if (dc->type_b) {
>          LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
> @@ -678,8 +687,8 @@ static void dec_mul(DisasContext *dc)
>              break;
>      }
>  done:
> -    tcg_temp_free(d[0]);
> -    tcg_temp_free(d[1]);
> +    tcg_temp_free_i32(d[0]);
> +    tcg_temp_free_i32(d[1]);
>  }
>  
>  /* Div unit.  */
> @@ -692,7 +701,7 @@ static void dec_div(DisasContext *dc)
>  
>      if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>            && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>      }
>  
> @@ -703,18 +712,18 @@ static void dec_div(DisasContext *dc)
>          gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
>                          cpu_R[dc->ra]);
>      if (!dc->rd)
> -        tcg_gen_movi_tl(cpu_R[dc->rd], 0);
> +        tcg_gen_movi_i32(cpu_R[dc->rd], 0);
>  }
>  
>  static void dec_barrel(DisasContext *dc)
>  {
> -    TCGv t0;
> +    TCGv_i32 t0;
>      unsigned int s, t;
>  
>      if ((dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>            && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
> @@ -725,25 +734,25 @@ static void dec_barrel(DisasContext *dc)
>      LOG_DIS("bs%s%s r%d r%d r%d\n",
>              s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
>  
> -    t0 = tcg_temp_new();
> +    t0 = tcg_temp_new_i32();
>  
> -    tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
> -    tcg_gen_andi_tl(t0, t0, 31);
> +    tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
> +    tcg_gen_andi_i32(t0, t0, 31);
>  
>      if (s)
> -        tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
> +        tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
>      else {
>          if (t)
> -            tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
> +            tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
>          else
> -            tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
> +            tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
>      }
>  }
>  
>  static void dec_bit(DisasContext *dc)
>  {
>      CPUState *cs = CPU(dc->cpu);
> -    TCGv t0;
> +    TCGv_i32 t0;
>      unsigned int op;
>      int mem_index = cpu_mmu_index(&dc->cpu->env);
>  
> @@ -751,16 +760,16 @@ static void dec_bit(DisasContext *dc)
>      switch (op) {
>          case 0x21:
>              /* src.  */
> -            t0 = tcg_temp_new();
> +            t0 = tcg_temp_new_i32();
>  
>              LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
> -            tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
> +            tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
>              write_carry(dc, cpu_R[dc->ra]);
>              if (dc->rd) {
> -                tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
> -                tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
> +                tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
> +                tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
>              }
> -            tcg_temp_free(t0);
> +            tcg_temp_free_i32(t0);
>              break;
>  
>          case 0x1:
> @@ -772,9 +781,9 @@ static void dec_bit(DisasContext *dc)
>              write_carry(dc, cpu_R[dc->ra]);
>              if (dc->rd) {
>                  if (op == 0x41)
> -                    tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
> +                    tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
>                  else
> -                    tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
> +                    tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
>              }
>              break;
>          case 0x60:
> @@ -793,7 +802,7 @@ static void dec_bit(DisasContext *dc)
>              LOG_DIS("wdc r%d\n", dc->ra);
>              if ((dc->tb_flags & MSR_EE_FLAG)
>                   && mem_index == MMU_USER_IDX) {
> -                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>                  t_gen_raise_exception(dc, EXCP_HW_EXCP);
>                  return;
>              }
> @@ -803,7 +812,7 @@ static void dec_bit(DisasContext *dc)
>              LOG_DIS("wic r%d\n", dc->ra);
>              if ((dc->tb_flags & MSR_EE_FLAG)
>                   && mem_index == MMU_USER_IDX) {
> -                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>                  t_gen_raise_exception(dc, EXCP_HW_EXCP);
>                  return;
>              }
> @@ -812,7 +821,7 @@ static void dec_bit(DisasContext *dc)
>              if ((dc->tb_flags & MSR_EE_FLAG)
>                  && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>                  && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
> -                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +                tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>                  t_gen_raise_exception(dc, EXCP_HW_EXCP);
>              }
>              if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
> @@ -840,22 +849,22 @@ static inline void sync_jmpstate(DisasContext *dc)
>  {
>      if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
>          if (dc->jmp == JMP_DIRECT) {
> -            tcg_gen_movi_tl(env_btaken, 1);
> +            tcg_gen_movi_i32(env_btaken, 1);
>          }
>          dc->jmp = JMP_INDIRECT;
> -        tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
> +        tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
>      }
>  }
>  
>  static void dec_imm(DisasContext *dc)
>  {
>      LOG_DIS("imm %x\n", dc->imm << 16);
> -    tcg_gen_movi_tl(env_imm, (dc->imm << 16));
> +    tcg_gen_movi_i32(env_imm, (dc->imm << 16));
>      dc->tb_flags |= IMM_FLAG;
>      dc->clear_imm = 0;
>  }
>  
> -static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
> +static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
>  {
>      unsigned int extimm = dc->tb_flags & IMM_FLAG;
>      /* Should be set to one if r1 is used by loadstores.  */
> @@ -879,8 +888,8 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
>              stackprot = 1;
>          }
>  
> -        *t = tcg_temp_new();
> -        tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
> +        *t = tcg_temp_new_i32();
> +        tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
>  
>          if (stackprot) {
>              gen_helper_stackprot(cpu_env, *t);
> @@ -892,12 +901,12 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
>          if (dc->imm == 0) {
>              return &cpu_R[dc->ra];
>          }
> -        *t = tcg_temp_new();
> -        tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
> -        tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
> +        *t = tcg_temp_new_i32();
> +        tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
> +        tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
>      } else {
> -        *t = tcg_temp_new();
> -        tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +        *t = tcg_temp_new_i32();
> +        tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>      }
>  
>      if (stackprot) {
> @@ -908,7 +917,7 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
>  
>  static void dec_load(DisasContext *dc)
>  {
> -    TCGv t, v, *addr;
> +    TCGv_i32 t, v, *addr;
>      unsigned int size, rev = 0, ex = 0;
>      TCGMemOp mop;
>  
> @@ -925,7 +934,7 @@ static void dec_load(DisasContext *dc)
>  
>      if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
> @@ -951,21 +960,21 @@ static void dec_load(DisasContext *dc)
>                     01 -> 10
>                     10 -> 10
>                     11 -> 00 */
> -                TCGv low = tcg_temp_new();
> +                TCGv_i32 low = tcg_temp_new_i32();
>  
>                  /* Force addr into the temp.  */
>                  if (addr != &t) {
> -                    t = tcg_temp_new();
> -                    tcg_gen_mov_tl(t, *addr);
> +                    t = tcg_temp_new_i32();
> +                    tcg_gen_mov_i32(t, *addr);
>                      addr = &t;
>                  }
>  
> -                tcg_gen_andi_tl(low, t, 3);
> -                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
> -                tcg_gen_andi_tl(t, t, ~3);
> -                tcg_gen_or_tl(t, t, low);
> -                tcg_gen_mov_tl(env_imm, t);
> -                tcg_temp_free(low);
> +                tcg_gen_andi_i32(low, t, 3);
> +                tcg_gen_sub_i32(low, tcg_const_i32(3), low);
> +                tcg_gen_andi_i32(t, t, ~3);
> +                tcg_gen_or_i32(t, t, low);
> +                tcg_gen_mov_i32(env_imm, t);
> +                tcg_temp_free_i32(low);
>                  break;
>              }
>  
> @@ -974,11 +983,11 @@ static void dec_load(DisasContext *dc)
>                     10 -> 00.  */
>                  /* Force addr into the temp.  */
>                  if (addr != &t) {
> -                    t = tcg_temp_new();
> -                    tcg_gen_xori_tl(t, *addr, 2);
> +                    t = tcg_temp_new_i32();
> +                    tcg_gen_xori_i32(t, *addr, 2);
>                      addr = &t;
>                  } else {
> -                    tcg_gen_xori_tl(t, t, 2);
> +                    tcg_gen_xori_i32(t, t, 2);
>                  }
>                  break;
>              default:
> @@ -991,11 +1000,11 @@ static void dec_load(DisasContext *dc)
>      if (ex) {
>          /* Force addr into the temp.  */
>          if (addr != &t) {
> -            t = tcg_temp_new();
> -            tcg_gen_mov_tl(t, *addr);
> +            t = tcg_temp_new_i32();
> +            tcg_gen_mov_i32(t, *addr);
>              addr = &t;
>          }
> -        tcg_gen_andi_tl(t, t, ~3);
> +        tcg_gen_andi_i32(t, t, ~3);
>      }
>  
>      /* If we get a fault on a dslot, the jmpstate better be in sync.  */
> @@ -1008,23 +1017,23 @@ static void dec_load(DisasContext *dc)
>       * into v. If the load succeeds, we verify alignment of the
>       * address and if that succeeds we write into the destination reg.
>       */
> -    v = tcg_temp_new();
> -    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
> +    v = tcg_temp_new_i32();
> +    tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
>  
>      if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
> -        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
> -        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
> -                            tcg_const_tl(0), tcg_const_tl(size - 1));
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
> +        gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
> +                            tcg_const_i32(0), tcg_const_i32(size - 1));
>      }
>  
>      if (ex) {
> -        tcg_gen_mov_tl(env_res_addr, *addr);
> -        tcg_gen_mov_tl(env_res_val, v);
> +        tcg_gen_mov_i32(env_res_addr, *addr);
> +        tcg_gen_mov_i32(env_res_val, v);
>      }
>      if (dc->rd) {
> -        tcg_gen_mov_tl(cpu_R[dc->rd], v);
> +        tcg_gen_mov_i32(cpu_R[dc->rd], v);
>      }
> -    tcg_temp_free(v);
> +    tcg_temp_free_i32(v);
>  
>      if (ex) { /* lwx */
>          /* no support for for AXI exclusive so always clear C */
> @@ -1032,12 +1041,12 @@ static void dec_load(DisasContext *dc)
>      }
>  
>      if (addr == &t)
> -        tcg_temp_free(t);
> +        tcg_temp_free_i32(t);
>  }
>  
>  static void dec_store(DisasContext *dc)
>  {
> -    TCGv t, *addr, swx_addr;
> +    TCGv_i32 t, *addr, swx_addr;
>      TCGLabel *swx_skip = NULL;
>      unsigned int size, rev = 0, ex = 0;
>      TCGMemOp mop;
> @@ -1055,7 +1064,7 @@ static void dec_store(DisasContext *dc)
>  
>      if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
> @@ -1067,31 +1076,31 @@ static void dec_store(DisasContext *dc)
>      sync_jmpstate(dc);
>      addr = compute_ldst_addr(dc, &t);
>  
> -    swx_addr = tcg_temp_local_new();
> +    swx_addr = tcg_temp_local_new_i32();
>      if (ex) { /* swx */
> -        TCGv tval;
> +        TCGv_i32 tval;
>  
>          /* Force addr into the swx_addr. */
> -        tcg_gen_mov_tl(swx_addr, *addr);
> +        tcg_gen_mov_i32(swx_addr, *addr);
>          addr = &swx_addr;
>          /* swx does not throw unaligned access errors, so force alignment */
> -        tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
> +        tcg_gen_andi_i32(swx_addr, swx_addr, ~3);
>  
>          write_carryi(dc, 1);
>          swx_skip = gen_new_label();
> -        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
> +        tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
>  
>          /* Compare the value loaded at lwx with current contents of
>             the reserved location.
>             FIXME: This only works for system emulation where we can expect
>             this compare and the following write to be atomic. For user
>             emulation we need to add atomicity between threads.  */
> -        tval = tcg_temp_new();
> -        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
> +        tval = tcg_temp_new_i32();
> +        tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
>                             MO_TEUL);
> -        tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
> +        tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
>          write_carryi(dc, 0);
> -        tcg_temp_free(tval);
> +        tcg_temp_free_i32(tval);
>      }
>  
>      if (rev && size != 4) {
> @@ -1103,21 +1112,21 @@ static void dec_store(DisasContext *dc)
>                     01 -> 10
>                     10 -> 10
>                     11 -> 00 */
> -                TCGv low = tcg_temp_new();
> +                TCGv_i32 low = tcg_temp_new_i32();
>  
>                  /* Force addr into the temp.  */
>                  if (addr != &t) {
> -                    t = tcg_temp_new();
> -                    tcg_gen_mov_tl(t, *addr);
> +                    t = tcg_temp_new_i32();
> +                    tcg_gen_mov_i32(t, *addr);
>                      addr = &t;
>                  }
>  
> -                tcg_gen_andi_tl(low, t, 3);
> -                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
> -                tcg_gen_andi_tl(t, t, ~3);
> -                tcg_gen_or_tl(t, t, low);
> -                tcg_gen_mov_tl(env_imm, t);
> -                tcg_temp_free(low);
> +                tcg_gen_andi_i32(low, t, 3);
> +                tcg_gen_sub_i32(low, tcg_const_i32(3), low);
> +                tcg_gen_andi_i32(t, t, ~3);
> +                tcg_gen_or_i32(t, t, low);
> +                tcg_gen_mov_i32(env_imm, t);
> +                tcg_temp_free_i32(low);
>                  break;
>              }
>  
> @@ -1126,11 +1135,11 @@ static void dec_store(DisasContext *dc)
>                     10 -> 00.  */
>                  /* Force addr into the temp.  */
>                  if (addr != &t) {
> -                    t = tcg_temp_new();
> -                    tcg_gen_xori_tl(t, *addr, 2);
> +                    t = tcg_temp_new_i32();
> +                    tcg_gen_xori_i32(t, *addr, 2);
>                      addr = &t;
>                  } else {
> -                    tcg_gen_xori_tl(t, t, 2);
> +                    tcg_gen_xori_i32(t, t, 2);
>                  }
>                  break;
>              default:
> @@ -1138,51 +1147,52 @@ static void dec_store(DisasContext *dc)
>                  break;
>          }
>      }
> -    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
> +    tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env),
> +                        mop);
>  
>      /* Verify alignment if needed.  */
>      if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
> -        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
>          /* FIXME: if the alignment is wrong, we should restore the value
>           *        in memory. One possible way to achieve this is to probe
>           *        the MMU prior to the memaccess, thay way we could put
>           *        the alignment checks in between the probe and the mem
>           *        access.
>           */
> -        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
> -                            tcg_const_tl(1), tcg_const_tl(size - 1));
> +        gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
> +                            tcg_const_i32(1), tcg_const_i32(size - 1));
>      }
>  
>      if (ex) {
>          gen_set_label(swx_skip);
>      }
> -    tcg_temp_free(swx_addr);
> +    tcg_temp_free_i32(swx_addr);
>  
>      if (addr == &t)
> -        tcg_temp_free(t);
> +        tcg_temp_free_i32(t);
>  }
>  
>  static inline void eval_cc(DisasContext *dc, unsigned int cc,
> -                           TCGv d, TCGv a, TCGv b)
> +                           TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
>  {
>      switch (cc) {
>          case CC_EQ:
> -            tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
>              break;
>          case CC_NE:
> -            tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
>              break;
>          case CC_LT:
> -            tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
>              break;
>          case CC_LE:
> -            tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
>              break;
>          case CC_GE:
> -            tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
>              break;
>          case CC_GT:
> -            tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
> +            tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
>              break;
>          default:
>              cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
> @@ -1190,13 +1200,13 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
>      }
>  }
>  
> -static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
> +static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
>  {
>      TCGLabel *l1 = gen_new_label();
>      /* Conditional jmp.  */
> -    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
> -    tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
> -    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
> +    tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
> +    tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
> +    tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
>      gen_set_label(l1);
>  }
>  
> @@ -1213,22 +1223,22 @@ static void dec_bcc(DisasContext *dc)
>      if (dslot) {
>          dc->delayed_branch = 2;
>          dc->tb_flags |= D_FLAG;
> -        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
> +        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
>                        cpu_env, offsetof(CPUMBState, bimm));
>      }
>  
>      if (dec_alu_op_b_is_small_imm(dc)) {
>          int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
>  
> -        tcg_gen_movi_tl(env_btarget, dc->pc + offset);
> +        tcg_gen_movi_i32(env_btarget, dc->pc + offset);
>          dc->jmp = JMP_DIRECT_CC;
>          dc->jmp_pc = dc->pc + offset;
>      } else {
>          dc->jmp = JMP_INDIRECT;
> -        tcg_gen_movi_tl(env_btarget, dc->pc);
> -        tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
> +        tcg_gen_movi_i32(env_btarget, dc->pc);
> +        tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
>      }
> -    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
> +    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
>  }
>  
>  static void dec_br(DisasContext *dc)
> @@ -1254,7 +1264,7 @@ static void dec_br(DisasContext *dc)
>              tcg_gen_st_i32(tmp_1, cpu_env,
>                             -offsetof(MicroBlazeCPU, env)
>                             +offsetof(CPUState, halted));
> -            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
> +            tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
>              gen_helper_raise_exception(cpu_env, tmp_hlt);
>              tcg_temp_free_i32(tmp_hlt);
>              tcg_temp_free_i32(tmp_1);
> @@ -1275,22 +1285,22 @@ static void dec_br(DisasContext *dc)
>      if (dslot) {
>          dc->delayed_branch = 2;
>          dc->tb_flags |= D_FLAG;
> -        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
> +        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
>                        cpu_env, offsetof(CPUMBState, bimm));
>      }
>      if (link && dc->rd)
> -        tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
> +        tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
>  
>      dc->jmp = JMP_INDIRECT;
>      if (abs) {
> -        tcg_gen_movi_tl(env_btaken, 1);
> -        tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
> +        tcg_gen_movi_i32(env_btaken, 1);
> +        tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
>          if (link && !dslot) {
>              if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
>                  t_gen_raise_exception(dc, EXCP_BREAK);
>              if (dc->imm == 0) {
>                  if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
> -                    tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +                    tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>                      t_gen_raise_exception(dc, EXCP_HW_EXCP);
>                      return;
>                  }
> @@ -1303,63 +1313,63 @@ static void dec_br(DisasContext *dc)
>              dc->jmp = JMP_DIRECT;
>              dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
>          } else {
> -            tcg_gen_movi_tl(env_btaken, 1);
> -            tcg_gen_movi_tl(env_btarget, dc->pc);
> -            tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
> +            tcg_gen_movi_i32(env_btaken, 1);
> +            tcg_gen_movi_i32(env_btarget, dc->pc);
> +            tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
>          }
>      }
>  }
>  
>  static inline void do_rti(DisasContext *dc)
>  {
> -    TCGv t0, t1;
> -    t0 = tcg_temp_new();
> -    t1 = tcg_temp_new();
> -    tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
> -    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
> -    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
> -
> -    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
> -    tcg_gen_or_tl(t1, t1, t0);
> +    TCGv_i32 t0, t1;
> +    t0 = tcg_temp_new_i32();
> +    t1 = tcg_temp_new_i32();
> +    tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
> +    tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
> +    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
> +
> +    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
> +    tcg_gen_or_i32(t1, t1, t0);
>      msr_write(dc, t1);
> -    tcg_temp_free(t1);
> -    tcg_temp_free(t0);
> +    tcg_temp_free_i32(t1);
> +    tcg_temp_free_i32(t0);
>      dc->tb_flags &= ~DRTI_FLAG;
>  }
>  
>  static inline void do_rtb(DisasContext *dc)
>  {
> -    TCGv t0, t1;
> -    t0 = tcg_temp_new();
> -    t1 = tcg_temp_new();
> -    tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
> -    tcg_gen_shri_tl(t0, t1, 1);
> -    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
> -
> -    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
> -    tcg_gen_or_tl(t1, t1, t0);
> +    TCGv_i32 t0, t1;
> +    t0 = tcg_temp_new_i32();
> +    t1 = tcg_temp_new_i32();
> +    tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
> +    tcg_gen_shri_i32(t0, t1, 1);
> +    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
> +
> +    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
> +    tcg_gen_or_i32(t1, t1, t0);
>      msr_write(dc, t1);
> -    tcg_temp_free(t1);
> -    tcg_temp_free(t0);
> +    tcg_temp_free_i32(t1);
> +    tcg_temp_free_i32(t0);
>      dc->tb_flags &= ~DRTB_FLAG;
>  }
>  
>  static inline void do_rte(DisasContext *dc)
>  {
> -    TCGv t0, t1;
> -    t0 = tcg_temp_new();
> -    t1 = tcg_temp_new();
> +    TCGv_i32 t0, t1;
> +    t0 = tcg_temp_new_i32();
> +    t1 = tcg_temp_new_i32();
>  
> -    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
> -    tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
> -    tcg_gen_shri_tl(t0, t1, 1);
> -    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
> +    tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
> +    tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
> +    tcg_gen_shri_i32(t0, t1, 1);
> +    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
>  
> -    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
> -    tcg_gen_or_tl(t1, t1, t0);
> +    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
> +    tcg_gen_or_i32(t1, t1, t0);
>      msr_write(dc, t1);
> -    tcg_temp_free(t1);
> -    tcg_temp_free(t0);
> +    tcg_temp_free_i32(t1);
> +    tcg_temp_free_i32(t0);
>      dc->tb_flags &= ~DRTE_FLAG;
>  }
>  
> @@ -1374,14 +1384,14 @@ static void dec_rts(DisasContext *dc)
>  
>      dc->delayed_branch = 2;
>      dc->tb_flags |= D_FLAG;
> -    tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
> +    tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
>                    cpu_env, offsetof(CPUMBState, bimm));
>  
>      if (i_bit) {
>          LOG_DIS("rtid ir=%x\n", dc->ir);
>          if ((dc->tb_flags & MSR_EE_FLAG)
>               && mem_index == MMU_USER_IDX) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          }
>          dc->tb_flags |= DRTI_FLAG;
> @@ -1389,7 +1399,7 @@ static void dec_rts(DisasContext *dc)
>          LOG_DIS("rtbd ir=%x\n", dc->ir);
>          if ((dc->tb_flags & MSR_EE_FLAG)
>               && mem_index == MMU_USER_IDX) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          }
>          dc->tb_flags |= DRTB_FLAG;
> @@ -1397,7 +1407,7 @@ static void dec_rts(DisasContext *dc)
>          LOG_DIS("rted ir=%x\n", dc->ir);
>          if ((dc->tb_flags & MSR_EE_FLAG)
>               && mem_index == MMU_USER_IDX) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          }
>          dc->tb_flags |= DRTE_FLAG;
> @@ -1405,8 +1415,8 @@ static void dec_rts(DisasContext *dc)
>          LOG_DIS("rts ir=%x\n", dc->ir);
>  
>      dc->jmp = JMP_INDIRECT;
> -    tcg_gen_movi_tl(env_btaken, 1);
> -    tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
> +    tcg_gen_movi_i32(env_btaken, 1);
> +    tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
>  }
>  
>  static int dec_check_fpuv2(DisasContext *dc)
> @@ -1416,7 +1426,7 @@ static int dec_check_fpuv2(DisasContext *dc)
>      r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
>  
>      if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>      }
>      return r;
> @@ -1429,7 +1439,7 @@ static void dec_fpu(DisasContext *dc)
>      if ((dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>            && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
> @@ -1531,7 +1541,7 @@ static void dec_null(DisasContext *dc)
>  {
>      if ((dc->tb_flags & MSR_EE_FLAG)
>            && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
> @@ -1550,29 +1560,29 @@ static void dec_stream(DisasContext *dc)
>              dc->type_b ? "" : "d", dc->imm);
>  
>      if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
> -        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
> +        tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
>          t_gen_raise_exception(dc, EXCP_HW_EXCP);
>          return;
>      }
>  
> -    t_id = tcg_temp_new();
> +    t_id = tcg_temp_new_i32();
>      if (dc->type_b) {
> -        tcg_gen_movi_tl(t_id, dc->imm & 0xf);
> +        tcg_gen_movi_i32(t_id, dc->imm & 0xf);
>          ctrl = dc->imm >> 10;
>      } else {
> -        tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
> +        tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
>          ctrl = dc->imm >> 5;
>      }
>  
> -    t_ctrl = tcg_const_tl(ctrl);
> +    t_ctrl = tcg_const_i32(ctrl);
>  
>      if (dc->rd == 0) {
>          gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
>      } else {
>          gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
>      }
> -    tcg_temp_free(t_id);
> -    tcg_temp_free(t_ctrl);
> +    tcg_temp_free_i32(t_id);
> +    tcg_temp_free_i32(t_ctrl);
>  }
>  
>  static struct decoder_info {
> @@ -1620,7 +1630,7 @@ static inline void decode(DisasContext *dc, uint32_t ir)
>          if ((dc->tb_flags & MSR_EE_FLAG)
>                && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
>                && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
> -            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
> +            tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
>              t_gen_raise_exception(dc, EXCP_HW_EXCP);
>              return;
>          }
> @@ -1675,8 +1685,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>      int j, lj;
>      struct DisasContext ctx;
>      struct DisasContext *dc = &ctx;
> -    uint32_t next_page_start, org_flags;
> -    target_ulong npc;
> +    uint32_t next_page_start, org_flags, npc;
>      int num_insns;
>      int max_insns;
>  
> @@ -1720,7 +1729,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>      {
>  #if SIM_COMPAT
>          if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
> -            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
> +            tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
>              gen_helper_debug();
>          }
>  #endif
> @@ -1764,7 +1773,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>                  dc->tb_flags &= ~D_FLAG;
>                  /* If it is a direct jump, try direct chaining.  */
>                  if (dc->jmp == JMP_INDIRECT) {
> -                    eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
> +                    eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
>                      dc->is_jmp = DISAS_JUMP;
>                  } else if (dc->jmp == JMP_DIRECT) {
>                      t_sync_flags(dc);
> @@ -1774,7 +1783,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>                      TCGLabel *l1 = gen_new_label();
>                      t_sync_flags(dc);
>                      /* Conditional jmp.  */
> -                    tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
> +                    tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
>                      gen_goto_tb(dc, 1, dc->pc);
>                      gen_set_label(l1);
>                      gen_goto_tb(dc, 0, dc->jmp_pc);
> @@ -1797,7 +1806,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>      if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
>          if (dc->tb_flags & D_FLAG) {
>              dc->is_jmp = DISAS_UPDATE;
> -            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
> +            tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
>              sync_jmpstate(dc);
>          } else
>              npc = dc->jmp_pc;
> @@ -1809,7 +1818,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>      if (dc->is_jmp == DISAS_NEXT
>          && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
>          dc->is_jmp = DISAS_UPDATE;
> -        tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
> +        tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
>      }
>      t_sync_flags(dc);
>  
> @@ -1817,7 +1826,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
>          TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
>  
>          if (dc->is_jmp != DISAS_JUMP) {
> -            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
> +            tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
>          }
>          gen_helper_raise_exception(cpu_env, tmp);
>          tcg_temp_free_i32(tmp);
> @@ -1922,34 +1931,34 @@ void mb_tcg_init(void)
>  
>      cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
>  
> -    env_debug = tcg_global_mem_new(TCG_AREG0, 
> +    env_debug = tcg_global_mem_new_i32(TCG_AREG0,
>                      offsetof(CPUMBState, debug),
>                      "debug0");
> -    env_iflags = tcg_global_mem_new(TCG_AREG0, 
> +    env_iflags = tcg_global_mem_new_i32(TCG_AREG0,
>                      offsetof(CPUMBState, iflags),
>                      "iflags");
> -    env_imm = tcg_global_mem_new(TCG_AREG0, 
> +    env_imm = tcg_global_mem_new_i32(TCG_AREG0,
>                      offsetof(CPUMBState, imm),
>                      "imm");
> -    env_btarget = tcg_global_mem_new(TCG_AREG0,
> +    env_btarget = tcg_global_mem_new_i32(TCG_AREG0,
>                       offsetof(CPUMBState, btarget),
>                       "btarget");
> -    env_btaken = tcg_global_mem_new(TCG_AREG0,
> +    env_btaken = tcg_global_mem_new_i32(TCG_AREG0,
>                       offsetof(CPUMBState, btaken),
>                       "btaken");
> -    env_res_addr = tcg_global_mem_new(TCG_AREG0,
> +    env_res_addr = tcg_global_mem_new_i32(TCG_AREG0,
>                       offsetof(CPUMBState, res_addr),
>                       "res_addr");
> -    env_res_val = tcg_global_mem_new(TCG_AREG0,
> +    env_res_val = tcg_global_mem_new_i32(TCG_AREG0,
>                       offsetof(CPUMBState, res_val),
>                       "res_val");
>      for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
> -        cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
> +        cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
>                            offsetof(CPUMBState, regs[i]),
>                            regnames[i]);
>      }
>      for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
> -        cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
> +        cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
>                            offsetof(CPUMBState, sregs[i]),
>                            special_regnames[i]);
>      }
> -- 
> 1.9.1
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15  4:49 [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong Peter Crosthwaite
  2015-05-15  5:56 ` Edgar E. Iglesias
@ 2015-05-15 15:41 ` Richard Henderson
  2015-05-15 16:48   ` Peter Crosthwaite
  1 sibling, 1 reply; 8+ messages in thread
From: Richard Henderson @ 2015-05-15 15:41 UTC (permalink / raw)
  To: Peter Crosthwaite, qemu-devel
  Cc: peter.maydell, edgari, afaerber, Peter Crosthwaite

On 05/14/2015 09:49 PM, Peter Crosthwaite wrote:
> To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
> This in turn will then allow support for multi-arch QEMU including both
> Microblaze and 64-bit CPU targets (notably AArch64).

I don't understand why multi-arch requires all of the arches
to have the same width.  This seems like a major failure to me.

I'm not particularly keen on this at all.


r~

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15 15:41 ` Richard Henderson
@ 2015-05-15 16:48   ` Peter Crosthwaite
  2015-05-15 17:17     ` Richard Henderson
  0 siblings, 1 reply; 8+ messages in thread
From: Peter Crosthwaite @ 2015-05-15 16:48 UTC (permalink / raw)
  To: Richard Henderson
  Cc: Peter Maydell, Peter Crosthwaite, Peter Crosthwaite,
	qemu-devel@nongnu.org Developers, Edgar Iglesias,
	Andreas Färber

On Fri, May 15, 2015 at 8:41 AM, Richard Henderson <rth@twiddle.net> wrote:
> On 05/14/2015 09:49 PM, Peter Crosthwaite wrote:
>> To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
>> This in turn will then allow support for multi-arch QEMU including both
>> Microblaze and 64-bit CPU targets (notably AArch64).
>
> I don't understand why multi-arch requires all of the arches
> to have the same width.  This seems like a major failure to me.
>
> I'm not particularly keen on this at all.
>

What is the alternative? What is the def of the global symbols TCGv
and TARGET_LONG in the multi-arch cases?

Regards,
Peter

>
> r~
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15 16:48   ` Peter Crosthwaite
@ 2015-05-15 17:17     ` Richard Henderson
  2015-05-15 20:52       ` Peter Crosthwaite
  0 siblings, 1 reply; 8+ messages in thread
From: Richard Henderson @ 2015-05-15 17:17 UTC (permalink / raw)
  To: Peter Crosthwaite
  Cc: Peter Maydell, Peter Crosthwaite, Peter Crosthwaite,
	qemu-devel@nongnu.org Developers, Edgar Iglesias,
	Andreas Färber

On 05/15/2015 09:48 AM, Peter Crosthwaite wrote:
> On Fri, May 15, 2015 at 8:41 AM, Richard Henderson <rth@twiddle.net> wrote:
>> On 05/14/2015 09:49 PM, Peter Crosthwaite wrote:
>>> To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
>>> This in turn will then allow support for multi-arch QEMU including both
>>> Microblaze and 64-bit CPU targets (notably AArch64).
>>
>> I don't understand why multi-arch requires all of the arches
>> to have the same width.  This seems like a major failure to me.
>>
>> I'm not particularly keen on this at all.
>>
> 
> What is the alternative? What is the def of the global symbols TCGv
> and TARGET_LONG in the multi-arch cases?

Different for every file?  Not relevant for "multi-arch" itself?
I dunno.  Where does stuff break down first?

I would expect 80% of it to be private to target-foo, and /mostly/ encapsulated
in the tcg ops that are produced.

I realize there's a problem of how addresses are treated inside the tcg
backend, but that should be surmountable.  Perhaps all we need are 4 new
opcodes so that both 32-bit and 64-bit addresses can be represented within the
opcode stream simultaneously.

I assume we're not still talking about multi-arch linux-user, but are really
only talking about softmmu here...


r~

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15 17:17     ` Richard Henderson
@ 2015-05-15 20:52       ` Peter Crosthwaite
  2015-05-18 18:37         ` Richard Henderson
  0 siblings, 1 reply; 8+ messages in thread
From: Peter Crosthwaite @ 2015-05-15 20:52 UTC (permalink / raw)
  To: Richard Henderson
  Cc: Peter Maydell, Peter Crosthwaite, Peter Crosthwaite,
	qemu-devel@nongnu.org Developers, Edgar Iglesias,
	Andreas Färber

On Fri, May 15, 2015 at 10:17 AM, Richard Henderson <rth@twiddle.net> wrote:
> On 05/15/2015 09:48 AM, Peter Crosthwaite wrote:
>> On Fri, May 15, 2015 at 8:41 AM, Richard Henderson <rth@twiddle.net> wrote:
>>> On 05/14/2015 09:49 PM, Peter Crosthwaite wrote:
>>>> To prepare support for conversion of Microblaze TARGET_LONG to 64 bits.
>>>> This in turn will then allow support for multi-arch QEMU including both
>>>> Microblaze and 64-bit CPU targets (notably AArch64).
>>>
>>> I don't understand why multi-arch requires all of the arches
>>> to have the same width.  This seems like a major failure to me.
>>>
>>> I'm not particularly keen on this at all.
>>>
>>
>> What is the alternative? What is the def of the global symbols TCGv
>> and TARGET_LONG in the multi-arch cases?
>
> Different for every file?  Not relevant for "multi-arch" itself?

So TCGv if generally localized to target-foo so it makes sense to
per-target'ify that one. Core code (or worse, device land code), does
make liberal use of target_ulong though so that is relavant to
multi-arch. Multi-arch needs to make the decision on what that length
is and currently that is 64.

Multiple choice quiz time again :) We have three choices that I can see:

1: We can undef-redefine the (global) target_ulong type set by
multi-arch for target-foo code. target_ulong then is different for
every file. Some explicit casts may be needed around core-API
interfaces, not sure yet.
2: Core code is converted to not use "target_ulong" (tcg_core_ulong?)
and target_ulong becomes a target-specific code concept.
3: Remove target_ulong usage from target-foo code for multi-arch
capable platfroms. The TCGv remains onconverted however, greatly
minmising this diff.

> I dunno.  Where does stuff break down first?
>
> I would expect 80% of it to be private to target-foo, and /mostly/ encapsulated
> in the tcg ops that are produced.
>
> I realize there's a problem of how addresses are treated inside the tcg
> backend, but that should be surmountable.  Perhaps all we need are 4 new
> opcodes so that both 32-bit and 64-bit addresses can be represented within the
> opcode stream simultaneously.
>
> I assume we're not still talking about multi-arch linux-user, but are really
> only talking about softmmu here...
>

Yes. Linux-user needs major refactorings (getting rid of the #ifdef
TARGET_FOO for one) before we can think about that.

Regards,
Peter

>
> r~
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-15 20:52       ` Peter Crosthwaite
@ 2015-05-18 18:37         ` Richard Henderson
  2015-05-18 18:43           ` Peter Maydell
  0 siblings, 1 reply; 8+ messages in thread
From: Richard Henderson @ 2015-05-18 18:37 UTC (permalink / raw)
  To: Peter Crosthwaite
  Cc: Peter Maydell, Peter Crosthwaite, Peter Crosthwaite,
	qemu-devel@nongnu.org Developers, Edgar Iglesias,
	Andreas Färber

On 05/15/2015 01:52 PM, Peter Crosthwaite wrote:
> 2: Core code is converted to not use "target_ulong" (tcg_core_ulong?)
> and target_ulong becomes a target-specific code concept.

This is my favorite.

I did poke around with new tcg opcodes to break some of the TARGET_LONG_BITS
usage in the tcg backend.  It wasn't terribly difficult until I reached the
memory helpers.

Aside from target_ulong addr, there's also TARGET_PAGE_MASK, TARGET_PAGE_SIZE,
TARGET_PAGE_BITS, CPU_TLB_SIZE.  What's our position on the compatibility of
the mmu between the multi-arch platforms?  I assume at the moment they must be
all the same?

Of course, I've mentioned before that ARM might well benefit from having these
values be variable and not constant at all.  Primarily because of the armv5 1K
pages affecting aarch64.



r~

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong
  2015-05-18 18:37         ` Richard Henderson
@ 2015-05-18 18:43           ` Peter Maydell
  0 siblings, 0 replies; 8+ messages in thread
From: Peter Maydell @ 2015-05-18 18:43 UTC (permalink / raw)
  To: Richard Henderson
  Cc: Peter Crosthwaite, Peter Crosthwaite, Peter Crosthwaite,
	qemu-devel@nongnu.org Developers, Edgar Iglesias,
	Andreas Färber

On 18 May 2015 at 19:37, Richard Henderson <rth@twiddle.net> wrote:
> Aside from target_ulong addr, there's also TARGET_PAGE_MASK, TARGET_PAGE_SIZE,
> TARGET_PAGE_BITS, CPU_TLB_SIZE.  What's our position on the compatibility of
> the mmu between the multi-arch platforms?  I assume at the moment they must be
> all the same?

In the general case we have to support these being different between
different CPUs...

> Of course, I've mentioned before that ARM might well benefit from having these
> values be variable and not constant at all.  Primarily because of the armv5 1K
> pages affecting aarch64.

Yes, it would be nice if we got a variable TARGET_PAGE_SIZE. (Also true
for 32-bit ARMv7, for that matter.)

-- PMM

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-05-18 18:43 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-15  4:49 [Qemu-devel] [PATCH] microblaze: Remove uses of TCGv and target_ulong Peter Crosthwaite
2015-05-15  5:56 ` Edgar E. Iglesias
2015-05-15 15:41 ` Richard Henderson
2015-05-15 16:48   ` Peter Crosthwaite
2015-05-15 17:17     ` Richard Henderson
2015-05-15 20:52       ` Peter Crosthwaite
2015-05-18 18:37         ` Richard Henderson
2015-05-18 18:43           ` Peter Maydell

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.