All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PULL 10/45] target/arm: Convert v8.2-fp16 from feature bit to aa64pfr0 test
Date: Fri, 19 Oct 2018 17:57:00 +0100	[thread overview]
Message-ID: <20181019165735.22511-11-peter.maydell@linaro.org> (raw)
In-Reply-To: <20181019165735.22511-1-peter.maydell@linaro.org>

From: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20181016223115.24100-9-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/cpu.h           | 17 +++++++++++++++-
 linux-user/elfload.c       |  6 +-----
 target/arm/cpu64.c         | 16 ++++++++-------
 target/arm/helper.c        |  2 +-
 target/arm/translate-a64.c | 40 +++++++++++++++++++-------------------
 target/arm/translate.c     |  6 +++---
 6 files changed, 50 insertions(+), 37 deletions(-)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 9750199ba27..895f9909d80 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1602,7 +1602,6 @@ enum arm_features {
     ARM_FEATURE_PMU, /* has PMU support */
     ARM_FEATURE_VBAR, /* has cp15 VBAR */
     ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
-    ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
     ARM_FEATURE_M_MAIN, /* M profile Main Extension */
 };
 
@@ -3217,6 +3216,16 @@ static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
     return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
 }
 
+static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
+{
+    /*
+     * This is a placeholder for use by VCMA until the rest of
+     * the ARMv8.2-FP16 extension is implemented for aa32 mode.
+     * At which point we can properly set and check MVFR1.FPHP.
+     */
+    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+}
+
 /*
  * 64-bit feature tests via id registers.
  */
@@ -3285,6 +3294,12 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
     return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
 }
 
+static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
+{
+    /* We always set the AdvSIMD and FP fields identically wrt FP16.  */
+    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+}
+
 static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
 {
     return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 64e41fb235c..45d6836bb96 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -573,8 +573,6 @@ static uint32_t get_elf_hwcap(void)
     hwcaps |= ARM_HWCAP_A64_ASIMD;
 
     /* probe for the extra features */
-#define GET_FEATURE(feat, hwcap) \
-    do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
 #define GET_FEATURE_ID(feat, hwcap) \
     do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
 
@@ -587,15 +585,13 @@ static uint32_t get_elf_hwcap(void)
     GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
     GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
     GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
-    GET_FEATURE(ARM_FEATURE_V8_FP16,
-                ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+    GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
     GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
     GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
     GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
     GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
     GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
 
-#undef GET_FEATURE
 #undef GET_FEATURE_ID
 
     return hwcaps;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 0520a421964..873f059bf22 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -320,6 +320,8 @@ static void aarch64_max_initfn(Object *obj)
 
         t = cpu->isar.id_aa64pfr0;
         t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
+        t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
+        t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
         cpu->isar.id_aa64pfr0 = t;
 
         /* Replicate the same data to the 32-bit id registers.  */
@@ -336,14 +338,14 @@ static void aarch64_max_initfn(Object *obj)
         u = FIELD_DP32(u, ID_ISAR6, DP, 1);
         cpu->isar.id_isar6 = u;
 
-#ifdef CONFIG_USER_ONLY
-        /* We don't set these in system emulation mode for the moment,
-         * since we don't correctly set the ID registers to advertise them,
-         * and in some cases they're only available in AArch64 and not AArch32,
-         * whereas the architecture requires them to be present in both if
-         * present in either.
+        /*
+         * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
+         * so do not set MVFR1.FPHP.  Strictly speaking this is not legal,
+         * but it is also not legal to enable SVE without support for FP16,
+         * and enabling SVE in system mode is more useful in the short term.
          */
-        set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
+
+#ifdef CONFIG_USER_ONLY
         /* For usermode -cpu max we can use a larger and more efficient DCZ
          * blocksize since we don't have to follow what the hardware does.
          */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 0685c9bc934..9a0e92f286c 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -11612,7 +11612,7 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
     uint32_t changed;
 
     /* When ARMv8.2-FP16 is not supported, FZ16 is RES0.  */
-    if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
+    if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
         val &= ~FPCR_FZ16;
     }
 
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 3feb786007b..09c7c4af047 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -4805,7 +4805,7 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
         break;
     case 3:
         size = MO_16;
-        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (dc_isar_feature(aa64_fp16, s)) {
             break;
         }
         /* fallthru */
@@ -4856,7 +4856,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
         break;
     case 3:
         size = MO_16;
-        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (dc_isar_feature(aa64_fp16, s)) {
             break;
         }
         /* fallthru */
@@ -4922,7 +4922,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
         break;
     case 3:
         sz = MO_16;
-        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (dc_isar_feature(aa64_fp16, s)) {
             break;
         }
         /* fallthru */
@@ -5255,7 +5255,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
             handle_fp_1src_double(s, opcode, rd, rn);
             break;
         case 3:
-            if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+            if (!dc_isar_feature(aa64_fp16, s)) {
                 unallocated_encoding(s);
                 return;
             }
@@ -5470,7 +5470,7 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
         handle_fp_2src_double(s, opcode, rd, rn, rm);
         break;
     case 3:
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (!dc_isar_feature(aa64_fp16, s)) {
             unallocated_encoding(s);
             return;
         }
@@ -5628,7 +5628,7 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
         handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
         break;
     case 3:
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (!dc_isar_feature(aa64_fp16, s)) {
             unallocated_encoding(s);
             return;
         }
@@ -5698,7 +5698,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
         break;
     case 3:
         sz = MO_16;
-        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (dc_isar_feature(aa64_fp16, s)) {
             break;
         }
         /* fallthru */
@@ -5923,7 +5923,7 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
     case 1: /* float64 */
         break;
     case 3: /* float16 */
-        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (dc_isar_feature(aa64_fp16, s)) {
             break;
         }
         /* fallthru */
@@ -6053,7 +6053,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
             break;
         case 0x6: /* 16-bit float, 32-bit int */
         case 0xe: /* 16-bit float, 64-bit int */
-            if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+            if (dc_isar_feature(aa64_fp16, s)) {
                 break;
             }
             /* fallthru */
@@ -6080,7 +6080,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
         case 1: /* float64 */
             break;
         case 3: /* float16 */
-            if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+            if (dc_isar_feature(aa64_fp16, s)) {
                 break;
             }
             /* fallthru */
@@ -6517,7 +6517,7 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
          */
         is_min = extract32(size, 1, 1);
         is_fp = true;
-        if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (!is_u && dc_isar_feature(aa64_fp16, s)) {
             size = 1;
         } else if (!is_u || !is_q || extract32(size, 0, 1)) {
             unallocated_encoding(s);
@@ -6913,7 +6913,7 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
 
     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
         /* Check for FMOV (vector, immediate) - half-precision */
-        if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
+        if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
             unallocated_encoding(s);
             return;
         }
@@ -7080,7 +7080,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
     case 0x2f: /* FMINP */
         /* FP op, size[0] is 32 or 64 bit*/
         if (!u) {
-            if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+            if (!dc_isar_feature(aa64_fp16, s)) {
                 unallocated_encoding(s);
                 return;
             } else {
@@ -7725,7 +7725,7 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
         size = MO_32;
     } else if (immh & 2) {
         size = MO_16;
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (!dc_isar_feature(aa64_fp16, s)) {
             unallocated_encoding(s);
             return;
         }
@@ -7770,7 +7770,7 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
         size = MO_32;
     } else if (immh & 0x2) {
         size = MO_16;
-        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+        if (!dc_isar_feature(aa64_fp16, s)) {
             unallocated_encoding(s);
             return;
         }
@@ -8534,7 +8534,7 @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
         return;
     }
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+    if (!dc_isar_feature(aa64_fp16, s)) {
         unallocated_encoding(s);
     }
 
@@ -11215,7 +11215,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
     TCGv_ptr fpst;
     bool pairwise = false;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+    if (!dc_isar_feature(aa64_fp16, s)) {
         unallocated_encoding(s);
         return;
     }
@@ -11430,7 +11430,7 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
     case 0x1c: /* FCADD, #90 */
     case 0x1e: /* FCADD, #270 */
         if (size == 0
-            || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
+            || (size == 1 && !dc_isar_feature(aa64_fp16, s))
             || (size == 3 && !is_q)) {
             unallocated_encoding(s);
             return;
@@ -12310,7 +12310,7 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
     bool need_fpst = true;
     int rmode;
 
-    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+    if (!dc_isar_feature(aa64_fp16, s)) {
         unallocated_encoding(s);
         return;
     }
@@ -12727,7 +12727,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
         }
         break;
     }
-    if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+    if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
         unallocated_encoding(s);
         return;
     }
diff --git a/target/arm/translate.c b/target/arm/translate.c
index e56b5cdff77..42dec1f7358 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -7812,7 +7812,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
         int size = extract32(insn, 20, 1);
         data = extract32(insn, 23, 2); /* rot */
         if (!dc_isar_feature(aa32_vcma, s)
-            || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
+            || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
             return 1;
         }
         fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
@@ -7821,7 +7821,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
         int size = extract32(insn, 20, 1);
         data = extract32(insn, 24, 1); /* rot */
         if (!dc_isar_feature(aa32_vcma, s)
-            || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
+            || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
             return 1;
         }
         fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
@@ -7894,7 +7894,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
             return 1;
         }
         if (size == 0) {
-            if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+            if (!dc_isar_feature(aa32_fp16_arith, s)) {
                 return 1;
             }
             /* For fp16, rm is just Vm, and index is M.  */
-- 
2.19.1

  parent reply	other threads:[~2018-10-19 16:57 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-19 16:56 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 01/45] ssi-sd: Make devices picking up backends unavailable with -device Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 02/45] target/arm: Add support for VCPU event states Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 03/45] target/arm: Move some system registers into a substructure Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 04/45] target/arm: V8M should not imply V7VE Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 05/45] target/arm: Convert v8 extensions from feature bits to isar tests Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 06/45] target/arm: Convert division from feature bits to isar0 tests Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 07/45] target/arm: Convert jazelle from feature bit to isar1 test Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 08/45] target/arm: Convert t32ee from feature bit to isar3 test Peter Maydell
2018-10-19 16:56 ` [Qemu-devel] [PULL 09/45] target/arm: Convert sve from feature bit to aa64pfr0 test Peter Maydell
2018-10-19 16:57 ` Peter Maydell [this message]
2018-10-19 16:57 ` [Qemu-devel] [PULL 11/45] target/arm: Improve debug logging of AArch32 exception return Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 12/45] target/arm: Make switch_mode() file-local Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 13/45] target/arm: Implement HCR.FB Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 14/45] target/arm: Implement HCR.DC Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 15/45] target/arm: ISR_EL1 bits track virtual interrupts if IMO/FMO set Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 16/45] target/arm: Implement HCR.VI and VF Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 17/45] target/arm: Implement HCR.PTW Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 18/45] target/arm: New utility function to extract EC from syndrome Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 19/45] target/arm: Get IL bit correct for v7 syndrome values Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 20/45] target/arm: Report correct syndrome for FP/SIMD traps to Hyp mode Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 21/45] hw/arm/boot: Increase compliance with kernel arm64 boot protocol Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 22/45] target/arm: Hoist address increment for vector memory ops Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 23/45] target/arm: Don't call tcg_clear_temp_count Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 24/45] target/arm: Use tcg_gen_gvec_dup_i64 for LD[1-4]R Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 25/45] target/arm: Promote consecutive memory ops for aa64 Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 26/45] target/arm: Mark some arrays const Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 27/45] target/arm: Use gvec for NEON VDUP Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 28/45] target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate) Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 29/45] target/arm: Use gvec for NEON_3R_LOGIC insns Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 30/45] target/arm: Use gvec for NEON_3R_VADD_VSUB insns Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 31/45] target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 32/45] target/arm: Use gvec for NEON_3R_VMUL Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 33/45] target/arm: Use gvec for VSHR, VSHL Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 34/45] target/arm: Use gvec for VSRA Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 35/45] target/arm: Use gvec for VSRI, VSLI Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 36/45] target/arm: Use gvec for NEON_3R_VML Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 37/45] target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 38/45] target/arm: Use gvec for NEON VLD all lanes Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 39/45] target/arm: Reorg NEON VLD/VST all elements Peter Maydell
2018-11-02 16:31   ` Laurent Vivier
2018-10-19 16:57 ` [Qemu-devel] [PULL 40/45] target/arm: Promote consecutive memory ops for aa32 Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 41/45] target/arm: Reorg NEON VLD/VST single element to one lane Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 42/45] net: cadence_gem: Announce availability of priority queues Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 43/45] net: cadence_gem: Announce 64bit addressing support Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 44/45] target/arm: Remove writefn from TTBR0_EL3 Peter Maydell
2018-10-19 16:57 ` [Qemu-devel] [PULL 45/45] target/arm: Only flush tlb if ASID changes Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181019165735.22511-11-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.