qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, qemu-arm@nongnu.org, steplong@quicinc.com
Subject: [PATCH v7 15/42] target/arm: Implement LDG, STG, ST2G instructions
Date: Tue,  2 Jun 2020 18:12:50 -0700	[thread overview]
Message-ID: <20200603011317.473934-16-richard.henderson@linaro.org> (raw)
In-Reply-To: <20200603011317.473934-1-richard.henderson@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: Split out allocation_tag_mem.  Handle atomicity of stores.
v3: Add X[t] input to these insns; require pre-cleaned addresses.
v5: Fix !32-byte aligned operation of st2g.
v6: Fix op2 extract, stg pre/post-index, stores vs sp, commentary;
    use pre-computed ata.
v7: Fix STZG iteration (stephen long)
---
 target/arm/helper-a64.h    |   7 ++
 target/arm/mte_helper.c    | 194 +++++++++++++++++++++++++++++++++++++
 target/arm/translate-a64.c | 158 +++++++++++++++++++++++++++++-
 3 files changed, 354 insertions(+), 5 deletions(-)

diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index 6c116481e8..2fa61b86fa 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -106,3 +106,10 @@ DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
 
 DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
 DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
+DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(stg, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(stg_parallel, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(stg_stub, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_3(st2g, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(st2g_parallel, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(st2g_stub, TCG_CALL_NO_WG, void, env, i64)
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 9ab9ed749d..7ec7930dfc 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -44,6 +44,40 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
     return tag;
 }
 
+/**
+ * allocation_tag_mem:
+ * @env: the cpu environment
+ * @ptr_mmu_idx: the addressing regime to use for the virtual address
+ * @ptr: the virtual address for which to look up tag memory
+ * @ptr_access: the access to use for the virtual address
+ * @ptr_size: the number of bytes in the normal memory access
+ * @tag_access: the access to use for the tag memory
+ * @tag_size: the number of bytes in the tag memory access
+ * @ra: the return address for exception handling
+ *
+ * Our tag memory is formatted as a sequence of little-endian nibbles.
+ * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
+ * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
+ * for the higher addr.
+ *
+ * Here, resolve the physical address from the virtual address, and return
+ * a pointer to the corresponding tag byte.  Exit with exception if the
+ * virtual address is not accessible for @ptr_access.
+ *
+ * The @ptr_size and @tag_size values may not have an obvious relation
+ * due to the alignment of @ptr, and the number of tag checks required.
+ *
+ * If there is no tag storage corresponding to @ptr, return NULL.
+ */
+static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
+                                   uint64_t ptr, MMUAccessType ptr_access,
+                                   int ptr_size, MMUAccessType tag_access,
+                                   int tag_size, uintptr_t ra)
+{
+    /* Tag storage not implemented.  */
+    return NULL;
+}
+
 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
 {
     int rtag;
@@ -80,3 +114,163 @@ uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
 
     return address_with_allocation_tag(ptr + offset, rtag);
 }
+
+static int load_tag1(uint64_t ptr, uint8_t *mem)
+{
+    int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+    return extract32(*mem, ofs, 4);
+}
+
+uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+    int mmu_idx = cpu_mmu_index(env, false);
+    uint8_t *mem;
+    int rtag = 0;
+
+    /* Trap if accessing an invalid page.  */
+    mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
+                             MMU_DATA_LOAD, 1, GETPC());
+
+    /* Load if page supports tags. */
+    if (mem) {
+        rtag = load_tag1(ptr, mem);
+    }
+
+    return address_with_allocation_tag(xt, rtag);
+}
+
+static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
+{
+    if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
+        arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
+                                    cpu_mmu_index(env, false), ra);
+        g_assert_not_reached();
+    }
+}
+
+/* For use in a non-parallel context, store to the given nibble.  */
+static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
+{
+    int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+    *mem = deposit32(*mem, ofs, 4, tag);
+}
+
+/* For use in a parallel context, atomically store to the given nibble.  */
+static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
+{
+    int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+    uint8_t old = atomic_read(mem);
+
+    while (1) {
+        uint8_t new = deposit32(old, ofs, 4, tag);
+        uint8_t cmp = atomic_cmpxchg(mem, old, new);
+        if (likely(cmp == old)) {
+            return;
+        }
+        old = cmp;
+    }
+}
+
+typedef void stg_store1(uint64_t, uint8_t *, int);
+
+static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
+                          uintptr_t ra, stg_store1 store1)
+{
+    int mmu_idx = cpu_mmu_index(env, false);
+    uint8_t *mem;
+
+    check_tag_aligned(env, ptr, ra);
+
+    /* Trap if accessing an invalid page.  */
+    mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
+                             MMU_DATA_STORE, 1, ra);
+
+    /* Store if page supports tags. */
+    if (mem) {
+        store1(ptr, mem, allocation_tag_from_addr(xt));
+    }
+}
+
+void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+    do_stg(env, ptr, xt, GETPC(), store_tag1);
+}
+
+void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+    do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
+}
+
+void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
+{
+    int mmu_idx = cpu_mmu_index(env, false);
+    uintptr_t ra = GETPC();
+
+    check_tag_aligned(env, ptr, ra);
+    probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
+}
+
+static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
+                           uintptr_t ra, stg_store1 store1)
+{
+    int mmu_idx = cpu_mmu_index(env, false);
+    int tag = allocation_tag_from_addr(xt);
+    uint8_t *mem1, *mem2;
+
+    check_tag_aligned(env, ptr, ra);
+
+    /*
+     * Trap if accessing an invalid page(s).
+     * This takes priority over !allocation_tag_access_enabled.
+     */
+    if (ptr & TAG_GRANULE) {
+        /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
+        mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
+                                  TAG_GRANULE, MMU_DATA_STORE, 1, ra);
+        mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
+                                  MMU_DATA_STORE, TAG_GRANULE,
+                                  MMU_DATA_STORE, 1, ra);
+
+        /* Store if page(s) support tags. */
+        if (mem1) {
+            store1(TAG_GRANULE, mem1, tag);
+        }
+        if (mem2) {
+            store1(0, mem2, tag);
+        }
+    } else {
+        /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
+        mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
+                                  2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
+        if (mem1) {
+            tag |= tag << 4;
+            atomic_set(mem1, tag);
+        }
+    }
+}
+
+void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+    do_st2g(env, ptr, xt, GETPC(), store_tag1);
+}
+
+void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+    do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
+}
+
+void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
+{
+    int mmu_idx = cpu_mmu_index(env, false);
+    uintptr_t ra = GETPC();
+    int in_page = -(ptr | TARGET_PAGE_MASK);
+
+    check_tag_aligned(env, ptr, ra);
+
+    if (likely(in_page >= 2 * TAG_GRANULE)) {
+        probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
+    } else {
+        probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
+        probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
+    }
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b032829194..0725c2ca07 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -3689,6 +3689,153 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
     }
 }
 
+/*
+ * Load/Store memory tags
+ *
+ *  31 30 29         24     22  21     12    10      5      0
+ * +-----+-------------+-----+---+------+-----+------+------+
+ * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 |  Rn  |  Rt  |
+ * +-----+-------------+-----+---+------+-----+------+------+
+ */
+static void disas_ldst_tag(DisasContext *s, uint32_t insn)
+{
+    int rt = extract32(insn, 0, 5);
+    int rn = extract32(insn, 5, 5);
+    uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
+    int op2 = extract32(insn, 10, 2);
+    int op1 = extract32(insn, 22, 2);
+    bool is_load = false, is_pair = false, is_zero = false;
+    int index = 0;
+    TCGv_i64 addr, clean_addr, tcg_rt;
+
+    /* We checked insn bits [29:24,21] in the caller.  */
+    if (extract32(insn, 30, 2) != 3) {
+        goto do_unallocated;
+    }
+
+    /*
+     * @index is a tri-state variable which has 3 states:
+     * < 0 : post-index, writeback
+     * = 0 : signed offset
+     * > 0 : pre-index, writeback
+     */
+    switch (op1) {
+    case 0: /* STG */
+        if (op2 != 0) {
+            /* STG */
+            index = op2 - 2;
+            break;
+        }
+        goto do_unallocated;
+    case 1:
+        if (op2 != 0) {
+            /* STZG */
+            is_zero = true;
+            index = op2 - 2;
+        } else {
+            /* LDG */
+            is_load = true;
+        }
+        break;
+    case 2:
+        if (op2 != 0) {
+            /* ST2G */
+            is_pair = true;
+            index = op2 - 2;
+            break;
+        }
+        goto do_unallocated;
+    case 3:
+        if (op2 != 0) {
+            /* STZ2G */
+            is_pair = is_zero = true;
+            index = op2 - 2;
+            break;
+        }
+        goto do_unallocated;
+
+    default:
+    do_unallocated:
+        unallocated_encoding(s);
+        return;
+    }
+
+    if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
+        goto do_unallocated;
+    }
+
+    if (rn == 31) {
+        gen_check_sp_alignment(s);
+    }
+
+    addr = read_cpu_reg_sp(s, rn, true);
+    if (index >= 0) {
+        /* pre-index or signed offset */
+        tcg_gen_addi_i64(addr, addr, offset);
+    }
+
+    if (is_load) {
+        tcg_rt = cpu_reg(s, rt);
+        if (s->ata) {
+            gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+        } else {
+            clean_addr = clean_data_tbi(s, addr);
+            gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
+            gen_address_with_allocation_tag0(tcg_rt, addr);
+        }
+    } else {
+        tcg_rt = cpu_reg_sp(s, rt);
+        if (!s->ata) {
+            /*
+             * For STG and ST2G, we need to check alignment and probe memory.
+             * TODO: For STZG and STZ2G, we could rely on the stores below,
+             * at least for system mode; user-only won't enforce alignment.
+             */
+            if (is_pair) {
+                gen_helper_st2g_stub(cpu_env, addr);
+            } else {
+                gen_helper_stg_stub(cpu_env, addr);
+            }
+        } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+            if (is_pair) {
+                gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
+            } else {
+                gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
+            }
+        } else {
+            if (is_pair) {
+                gen_helper_st2g(cpu_env, addr, tcg_rt);
+            } else {
+                gen_helper_stg(cpu_env, addr, tcg_rt);
+            }
+        }
+    }
+
+    if (is_zero) {
+        TCGv_i64 clean_addr = clean_data_tbi(s, addr);
+        TCGv_i64 tcg_zero = tcg_const_i64(0);
+        int mem_index = get_mem_index(s);
+        int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
+
+        tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
+                            MO_Q | MO_ALIGN_16);
+        for (i = 8; i < n; i += 8) {
+            tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+            tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
+        }
+        tcg_temp_free_i64(tcg_zero);
+    }
+
+    if (index != 0) {
+        /* pre-index or post-index */
+        if (index < 0) {
+            /* post-index */
+            tcg_gen_addi_i64(addr, addr, offset);
+        }
+        tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
+    }
+}
+
 /* Loads and stores */
 static void disas_ldst(DisasContext *s, uint32_t insn)
 {
@@ -3713,13 +3860,14 @@ static void disas_ldst(DisasContext *s, uint32_t insn)
     case 0x0d: /* AdvSIMD load/store single structure */
         disas_ldst_single_struct(s, insn);
         break;
-    case 0x19: /* LDAPR/STLR (unscaled immediate) */
-        if (extract32(insn, 10, 2) != 0 ||
-            extract32(insn, 21, 1) != 0) {
+    case 0x19:
+        if (extract32(insn, 21, 1) != 0) {
+            disas_ldst_tag(s, insn);
+        } else if (extract32(insn, 10, 2) == 0) {
+            disas_ldst_ldapr_stlr(s, insn);
+        } else {
             unallocated_encoding(s);
-            break;
         }
-        disas_ldst_ldapr_stlr(s, insn);
         break;
     default:
         unallocated_encoding(s);
-- 
2.25.1



  parent reply	other threads:[~2020-06-03  1:19 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03  1:12 [PATCH v7 00/42] target/arm: Implement ARMv8.5-MemTag, system mode Richard Henderson
2020-06-03  1:12 ` [PATCH v7 01/42] target/arm: Add isar tests for mte Richard Henderson
2020-06-18 10:50   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 02/42] target/arm: Improve masking of SCR RES0 bits Richard Henderson
2020-06-18 10:50   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 03/42] target/arm: Add support for MTE to SCTLR_ELx Richard Henderson
2020-06-18 10:52   ` Peter Maydell
2020-06-18 18:08     ` Richard Henderson
2020-06-18 18:44       ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 04/42] target/arm: Add support for MTE to HCR_EL2 and SCR_EL3 Richard Henderson
2020-06-18 11:02   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 05/42] target/arm: Rename DISAS_UPDATE to DISAS_UPDATE_EXIT Richard Henderson
2020-06-18 11:03   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 06/42] target/arm: Add DISAS_UPDATE_NOCHAIN Richard Henderson
2020-06-18 11:14   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 07/42] target/arm: Add MTE system registers Richard Henderson
2020-06-18 11:29   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 08/42] target/arm: Add MTE bits to tb_flags Richard Henderson
2020-06-18 11:37   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 09/42] target/arm: Implement the IRG instruction Richard Henderson
2020-06-18 11:48   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 10/42] target/arm: Implement the ADDG, SUBG instructions Richard Henderson
2020-06-18 13:17   ` Peter Maydell
2020-06-18 16:12     ` Richard Henderson
2020-06-18 16:16       ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 11/42] target/arm: Implement the GMI instruction Richard Henderson
2020-06-18 13:19   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 12/42] target/arm: Implement the SUBP instruction Richard Henderson
2020-06-03  1:12 ` [PATCH v7 13/42] target/arm: Define arm_cpu_do_unaligned_access for user-only Richard Henderson
2020-06-18 13:31   ` Peter Maydell
2020-06-18 17:03     ` Richard Henderson
2020-06-18 17:45       ` Peter Maydell
2020-06-18 21:01       ` Richard Henderson
2020-06-03  1:12 ` [PATCH v7 14/42] target/arm: Add helper_probe_access Richard Henderson
2020-06-18 13:33   ` Peter Maydell
2020-06-18 19:19     ` Richard Henderson
2020-06-03  1:12 ` Richard Henderson [this message]
2020-06-18 13:56   ` [PATCH v7 15/42] target/arm: Implement LDG, STG, ST2G instructions Peter Maydell
2020-06-18 17:09     ` Richard Henderson
2020-06-03  1:12 ` [PATCH v7 16/42] target/arm: Implement the STGP instruction Richard Henderson
2020-06-03  1:12 ` [PATCH v7 17/42] target/arm: Restrict the values of DCZID.BS under TCG Richard Henderson
2020-06-18 14:07   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 18/42] target/arm: Simplify DC_ZVA Richard Henderson
2020-06-18 14:22   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 19/42] target/arm: Implement the LDGM, STGM, STZGM instructions Richard Henderson
2020-06-19 11:04   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 20/42] target/arm: Implement the access tag cache flushes Richard Henderson
2020-06-18 16:28   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 21/42] target/arm: Move regime_el to internals.h Richard Henderson
2020-06-18 16:29   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 22/42] target/arm: Move regime_tcr " Richard Henderson
2020-06-18 16:30   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 23/42] target/arm: Add gen_mte_check1 Richard Henderson
2020-06-18 16:34   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 24/42] target/arm: Add gen_mte_checkN Richard Henderson
2020-06-18 16:36   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 25/42] target/arm: Implement helper_mte_check1 Richard Henderson
2020-06-18 16:37   ` Peter Maydell
2020-06-18 17:32     ` Richard Henderson
2020-06-19 13:44   ` Peter Maydell
2020-06-19 17:07     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 26/42] target/arm: Implement helper_mte_checkN Richard Henderson
2020-06-19 13:52   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 27/42] target/arm: Add helper_mte_check_zva Richard Henderson
2020-06-19 13:55   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 28/42] target/arm: Use mte_checkN for sve unpredicated loads Richard Henderson
2020-06-19 13:58   ` Peter Maydell
2020-06-19 19:26     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 29/42] target/arm: Use mte_checkN for sve unpredicated stores Richard Henderson
2020-06-19 14:01   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 30/42] target/arm: Use mte_check1 for sve LD1R Richard Henderson
2020-06-19 14:03   ` Peter Maydell
2020-06-19 19:40     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 31/42] target/arm: Add mte helpers for sve scalar + int loads Richard Henderson
2020-06-19 14:06   ` Peter Maydell
2020-06-19 19:45     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 32/42] target/arm: Add mte helpers for sve scalar + int stores Richard Henderson
2020-06-03  1:13 ` [PATCH v7 33/42] target/arm: Add mte helpers for sve scalar + int ff/nf loads Richard Henderson
2020-06-03  1:13 ` [PATCH v7 34/42] target/arm: Handle TBI for sve scalar + int memory ops Richard Henderson
2020-06-19 14:07   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 35/42] target/arm: Add mte helpers for sve scatter/gather " Richard Henderson
2020-06-03  1:13 ` [PATCH v7 36/42] target/arm: Complete TBI clearing for user-only for SVE Richard Henderson
2020-06-03  1:13 ` [PATCH v7 37/42] target/arm: Implement data cache set allocation tags Richard Henderson
2020-06-19 14:11   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 38/42] target/arm: Set PSTATE.TCO on exception entry Richard Henderson
2020-06-03  1:13 ` [PATCH v7 39/42] target/arm: Enable MTE Richard Henderson
2020-06-18 16:39   ` Peter Maydell
2020-06-18 17:35     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 40/42] target/arm: Cache the Tagged bit for a page in MemTxAttrs Richard Henderson
2020-06-19 14:29   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 41/42] target/arm: Create tagged ram when MTE is enabled Richard Henderson
2020-06-19 14:37   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 42/42] target/arm: Add allocation tag storage for system mode Richard Henderson
2020-06-03  2:15 ` [PATCH v7 00/42] target/arm: Implement ARMv8.5-MemTag, " no-reply
2020-06-03  4:07   ` Richard Henderson
2020-06-19 14:38 ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603011317.473934-16-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=steplong@quicinc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).