All of lore.kernel.org
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: peter.maydell@linaro.org, qemu-arm@nongnu.org, steplong@quicinc.com
Subject: [PATCH v7 23/42] target/arm: Add gen_mte_check1
Date: Tue,  2 Jun 2020 18:12:58 -0700	[thread overview]
Message-ID: <20200603011317.473934-24-richard.henderson@linaro.org> (raw)
In-Reply-To: <20200603011317.473934-1-richard.henderson@linaro.org>

Replace existing uses of check_data_tbi in translate-a64.c that
perform a single logical memory access.  Leave the helper blank
for now to reduce the patch size.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/helper-a64.h    |  1 +
 target/arm/internals.h     |  8 ++++
 target/arm/translate-a64.h |  2 +
 target/arm/mte_helper.c    |  8 ++++
 target/arm/translate-a64.c | 96 ++++++++++++++++++++++++++++----------
 5 files changed, 91 insertions(+), 24 deletions(-)

diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index 7b628d100e..2faa49d0a3 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -104,6 +104,7 @@ DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
 DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
 DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
 
+DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
 DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
 DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
 DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 7c9abbabc9..fb92ef6b84 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1310,6 +1310,14 @@ void arm_log_exception(int idx);
 #define LOG2_TAG_GRANULE 4
 #define TAG_GRANULE      (1 << LOG2_TAG_GRANULE)
 
+/* Bits within a descriptor passed to the helper_mte_check* functions. */
+FIELD(MTEDESC, MIDX,  0, 4)
+FIELD(MTEDESC, TBI,   4, 2)
+FIELD(MTEDESC, TCMA,  6, 2)
+FIELD(MTEDESC, WRITE, 8, 1)
+FIELD(MTEDESC, ESIZE, 9, 5)
+FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */
+
 static inline int allocation_tag_from_addr(uint64_t ptr)
 {
     return extract64(ptr, 56, 4);
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index f02fbb63a4..1ab033e4c1 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -40,6 +40,8 @@ TCGv_ptr get_fpstatus_ptr(bool);
 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
                             unsigned int imms, unsigned int immr);
 bool sve_access_check(DisasContext *s);
+TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
+                        bool tag_checked, int log2_size);
 
 /* We should have at some point before trying to access an FP register
  * done the necessary access check, so assert that
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 27d4b4536c..ec12768dfc 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -358,3 +358,11 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
         memset(mem, tag_pair, tag_bytes);
     }
 }
+
+/*
+ * Perform an MTE checked access for a single logical or atomic access.
+ */
+uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+    return ptr;
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index c4f86c69ba..9f8ae99cc0 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -204,20 +204,20 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
 }
 
 /*
- * Return a "clean" address for ADDR according to TBID.
- * This is always a fresh temporary, as we need to be able to
- * increment this independently of a dirty write-back address.
+ * Handle MTE and/or TBI.
+ *
+ * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
+ * for the tag to be present in the FAR_ELx register.  But for user-only
+ * mode, we do not have a TLB with which to implement this, so we must
+ * remote the top byte now.
+ *
+ * Always return a fresh temporary that we can increment independently
+ * of the write-back address.
  */
+
 static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
 {
     TCGv_i64 clean = new_tmp_a64(s);
-    /*
-     * In order to get the correct value in the FAR_ELx register,
-     * we must present the memory subsystem with the "dirty" address
-     * including the TBI.  In system mode we can make this work via
-     * the TLB, dropping the TBI during translation.  But for user-only
-     * mode we don't have that option, and must remove the top byte now.
-     */
 #ifdef CONFIG_USER_ONLY
     gen_top_byte_ignore(s, clean, addr, s->tbid);
 #else
@@ -245,6 +245,45 @@ static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
     tcg_temp_free_i32(t_size);
 }
 
+/*
+ * For MTE, check a single logical or atomic access.  This probes a single
+ * address, the exact one specified.  The size and alignment of the access
+ * is not relevant to MTE, per se, but watchpoints do require the size,
+ * and we want to recognize those before making any other changes to state.
+ */
+static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
+                                      bool is_write, bool tag_checked,
+                                      int log2_size, bool is_unpriv,
+                                      int core_idx)
+{
+    if (tag_checked && s->mte_active[is_unpriv]) {
+        TCGv_i32 tcg_desc;
+        TCGv_i64 ret;
+        int desc = 0;
+
+        desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
+        desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+        desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+        desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+        desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
+        tcg_desc = tcg_const_i32(desc);
+
+        ret = new_tmp_a64(s);
+        gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
+        tcg_temp_free_i32(tcg_desc);
+
+        return ret;
+    }
+    return clean_data_tbi(s, addr);
+}
+
+TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
+                        bool tag_checked, int log2_size)
+{
+    return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
+                                 false, get_mem_index(s));
+}
+
 typedef struct DisasCompare64 {
     TCGCond cond;
     TCGv_i64 value;
@@ -2358,7 +2397,7 @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
     if (rn == 31) {
         gen_check_sp_alignment(s);
     }
-    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
     tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
                                size | MO_ALIGN | s->be_data);
 }
@@ -2376,7 +2415,9 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
     if (rn == 31) {
         gen_check_sp_alignment(s);
     }
-    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+
+    /* This is a single atomic access, despite the "pair". */
+    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
 
     if (size == 2) {
         TCGv_i64 cmp = tcg_temp_new_i64();
@@ -2501,7 +2542,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
         if (is_lasr) {
             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
         }
-        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
         gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
         return;
 
@@ -2510,7 +2551,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
         if (rn == 31) {
             gen_check_sp_alignment(s);
         }
-        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
         s->is_ldex = true;
         gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
         if (is_lasr) {
@@ -2530,7 +2571,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             gen_check_sp_alignment(s);
         }
         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
-        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
         do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
                   disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
         return;
@@ -2546,7 +2587,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
         if (rn == 31) {
             gen_check_sp_alignment(s);
         }
-        clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
                   disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
@@ -2560,7 +2601,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             if (is_lasr) {
                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
             }
-            clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+                                        true, rn != 31, size);
             gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
             return;
         }
@@ -2578,7 +2620,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             if (rn == 31) {
                 gen_check_sp_alignment(s);
             }
-            clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+                                        false, rn != 31, size);
             s->is_ldex = true;
             gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
             if (is_lasr) {
@@ -2872,6 +2915,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
     bool iss_valid = !is_vector;
     bool post_index;
     bool writeback;
+    int memidx;
 
     TCGv_i64 clean_addr, dirty_addr;
 
@@ -2929,7 +2973,11 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
     if (!post_index) {
         tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
     }
-    clean_addr = clean_data_tbi(s, dirty_addr);
+
+    memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
+    clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
+                                       writeback || rn != 31,
+                                       size, is_unpriv, memidx);
 
     if (is_vector) {
         if (is_store) {
@@ -2939,7 +2987,6 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
         }
     } else {
         TCGv_i64 tcg_rt = cpu_reg(s, rt);
-        int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
 
         if (is_store) {
@@ -3036,7 +3083,7 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
     ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
 
     tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
-    clean_addr = clean_data_tbi(s, dirty_addr);
+    clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
 
     if (is_vector) {
         if (is_store) {
@@ -3121,7 +3168,7 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
     dirty_addr = read_cpu_reg_sp(s, rn, 1);
     offset = imm12 << size;
     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
-    clean_addr = clean_data_tbi(s, dirty_addr);
+    clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
 
     if (is_vector) {
         if (is_store) {
@@ -3214,7 +3261,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
     if (rn == 31) {
         gen_check_sp_alignment(s);
     }
-    clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
 
     if (o3_opc == 014) {
         /*
@@ -3291,7 +3338,8 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
 
     /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
-    clean_addr = clean_data_tbi(s, dirty_addr);
+    clean_addr = gen_mte_check1(s, dirty_addr, false,
+                                is_wback || rn != 31, size);
 
     tcg_rt = cpu_reg(s, rt);
     do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false,
-- 
2.25.1



  parent reply	other threads:[~2020-06-03  1:31 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03  1:12 [PATCH v7 00/42] target/arm: Implement ARMv8.5-MemTag, system mode Richard Henderson
2020-06-03  1:12 ` [PATCH v7 01/42] target/arm: Add isar tests for mte Richard Henderson
2020-06-18 10:50   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 02/42] target/arm: Improve masking of SCR RES0 bits Richard Henderson
2020-06-18 10:50   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 03/42] target/arm: Add support for MTE to SCTLR_ELx Richard Henderson
2020-06-18 10:52   ` Peter Maydell
2020-06-18 18:08     ` Richard Henderson
2020-06-18 18:44       ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 04/42] target/arm: Add support for MTE to HCR_EL2 and SCR_EL3 Richard Henderson
2020-06-18 11:02   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 05/42] target/arm: Rename DISAS_UPDATE to DISAS_UPDATE_EXIT Richard Henderson
2020-06-18 11:03   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 06/42] target/arm: Add DISAS_UPDATE_NOCHAIN Richard Henderson
2020-06-18 11:14   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 07/42] target/arm: Add MTE system registers Richard Henderson
2020-06-18 11:29   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 08/42] target/arm: Add MTE bits to tb_flags Richard Henderson
2020-06-18 11:37   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 09/42] target/arm: Implement the IRG instruction Richard Henderson
2020-06-18 11:48   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 10/42] target/arm: Implement the ADDG, SUBG instructions Richard Henderson
2020-06-18 13:17   ` Peter Maydell
2020-06-18 16:12     ` Richard Henderson
2020-06-18 16:16       ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 11/42] target/arm: Implement the GMI instruction Richard Henderson
2020-06-18 13:19   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 12/42] target/arm: Implement the SUBP instruction Richard Henderson
2020-06-03  1:12 ` [PATCH v7 13/42] target/arm: Define arm_cpu_do_unaligned_access for user-only Richard Henderson
2020-06-18 13:31   ` Peter Maydell
2020-06-18 17:03     ` Richard Henderson
2020-06-18 17:45       ` Peter Maydell
2020-06-18 21:01       ` Richard Henderson
2020-06-03  1:12 ` [PATCH v7 14/42] target/arm: Add helper_probe_access Richard Henderson
2020-06-18 13:33   ` Peter Maydell
2020-06-18 19:19     ` Richard Henderson
2020-06-03  1:12 ` [PATCH v7 15/42] target/arm: Implement LDG, STG, ST2G instructions Richard Henderson
2020-06-18 13:56   ` Peter Maydell
2020-06-18 17:09     ` Richard Henderson
2020-06-03  1:12 ` [PATCH v7 16/42] target/arm: Implement the STGP instruction Richard Henderson
2020-06-03  1:12 ` [PATCH v7 17/42] target/arm: Restrict the values of DCZID.BS under TCG Richard Henderson
2020-06-18 14:07   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 18/42] target/arm: Simplify DC_ZVA Richard Henderson
2020-06-18 14:22   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 19/42] target/arm: Implement the LDGM, STGM, STZGM instructions Richard Henderson
2020-06-19 11:04   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 20/42] target/arm: Implement the access tag cache flushes Richard Henderson
2020-06-18 16:28   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 21/42] target/arm: Move regime_el to internals.h Richard Henderson
2020-06-18 16:29   ` Peter Maydell
2020-06-03  1:12 ` [PATCH v7 22/42] target/arm: Move regime_tcr " Richard Henderson
2020-06-18 16:30   ` Peter Maydell
2020-06-03  1:12 ` Richard Henderson [this message]
2020-06-18 16:34   ` [PATCH v7 23/42] target/arm: Add gen_mte_check1 Peter Maydell
2020-06-03  1:12 ` [PATCH v7 24/42] target/arm: Add gen_mte_checkN Richard Henderson
2020-06-18 16:36   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 25/42] target/arm: Implement helper_mte_check1 Richard Henderson
2020-06-18 16:37   ` Peter Maydell
2020-06-18 17:32     ` Richard Henderson
2020-06-19 13:44   ` Peter Maydell
2020-06-19 17:07     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 26/42] target/arm: Implement helper_mte_checkN Richard Henderson
2020-06-19 13:52   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 27/42] target/arm: Add helper_mte_check_zva Richard Henderson
2020-06-19 13:55   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 28/42] target/arm: Use mte_checkN for sve unpredicated loads Richard Henderson
2020-06-19 13:58   ` Peter Maydell
2020-06-19 19:26     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 29/42] target/arm: Use mte_checkN for sve unpredicated stores Richard Henderson
2020-06-19 14:01   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 30/42] target/arm: Use mte_check1 for sve LD1R Richard Henderson
2020-06-19 14:03   ` Peter Maydell
2020-06-19 19:40     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 31/42] target/arm: Add mte helpers for sve scalar + int loads Richard Henderson
2020-06-19 14:06   ` Peter Maydell
2020-06-19 19:45     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 32/42] target/arm: Add mte helpers for sve scalar + int stores Richard Henderson
2020-06-03  1:13 ` [PATCH v7 33/42] target/arm: Add mte helpers for sve scalar + int ff/nf loads Richard Henderson
2020-06-03  1:13 ` [PATCH v7 34/42] target/arm: Handle TBI for sve scalar + int memory ops Richard Henderson
2020-06-19 14:07   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 35/42] target/arm: Add mte helpers for sve scatter/gather " Richard Henderson
2020-06-03  1:13 ` [PATCH v7 36/42] target/arm: Complete TBI clearing for user-only for SVE Richard Henderson
2020-06-03  1:13 ` [PATCH v7 37/42] target/arm: Implement data cache set allocation tags Richard Henderson
2020-06-19 14:11   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 38/42] target/arm: Set PSTATE.TCO on exception entry Richard Henderson
2020-06-03  1:13 ` [PATCH v7 39/42] target/arm: Enable MTE Richard Henderson
2020-06-18 16:39   ` Peter Maydell
2020-06-18 17:35     ` Richard Henderson
2020-06-03  1:13 ` [PATCH v7 40/42] target/arm: Cache the Tagged bit for a page in MemTxAttrs Richard Henderson
2020-06-19 14:29   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 41/42] target/arm: Create tagged ram when MTE is enabled Richard Henderson
2020-06-19 14:37   ` Peter Maydell
2020-06-03  1:13 ` [PATCH v7 42/42] target/arm: Add allocation tag storage for system mode Richard Henderson
2020-06-03  2:15 ` [PATCH v7 00/42] target/arm: Implement ARMv8.5-MemTag, " no-reply
2020-06-03  4:07   ` Richard Henderson
2020-06-19 14:38 ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603011317.473934-24-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=steplong@quicinc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.