All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Maydell <peter.maydell@linaro.org>
To: qemu-devel@nongnu.org
Subject: [PULL 11/33] target/arm: Convert load/store exclusive and ordered to decodetree
Date: Mon, 19 Jun 2023 15:28:52 +0100	[thread overview]
Message-ID: <20230619142914.963184-12-peter.maydell@linaro.org> (raw)
In-Reply-To: <20230619142914.963184-1-peter.maydell@linaro.org>

Convert the instructions in the load/store exclusive (STXR,
STLXR, LDXR, LDAXR) and load/store ordered (STLR, STLLR,
LDAR, LDLAR) to decodetree.

Note that for STLR, STLLR, LDAR, LDLAR this fixes an under-decoding
in the legacy decoder where we were not checking that the RES1 bits
in the Rs and Rt2 fields were set.

The new function ldst_iss_sf() is equivalent to the existing
disas_ldst_compute_iss_sf(), but it takes the pre-decoded 'ext' field
rather than taking an undecoded two-bit opc field and extracting
'ext' from it. Once all the loads and stores have been converted
to decodetree disas_ldst_compute_iss_sf() will be unused and
can be deleted.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230602155223.2040685-9-peter.maydell@linaro.org
---
 target/arm/tcg/a64.decode      |  11 +++
 target/arm/tcg/translate-a64.c | 154 ++++++++++++++++++++-------------
 2 files changed, 103 insertions(+), 62 deletions(-)

diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index eeaca08ae83..c5894fc06d2 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -230,3 +230,14 @@ HLT             1101 0100 010 ................ 000 00 @i16
 # DCPS1         1101 0100 101 ................ 000 01 @i16
 # DCPS2         1101 0100 101 ................ 000 10 @i16
 # DCPS3         1101 0100 101 ................ 000 11 @i16
+
+# Loads and stores
+
+&stxr           rn rt rt2 rs sz lasr
+&stlr           rn rt sz lasr
+@stxr           sz:2 ...... ... rs:5 lasr:1 rt2:5 rn:5 rt:5 &stxr
+@stlr           sz:2 ...... ... ..... lasr:1 ..... rn:5 rt:5 &stlr
+STXR            .. 001000 000 ..... . ..... ..... ..... @stxr  # inc STLXR
+LDXR            .. 001000 010 ..... . ..... ..... ..... @stxr  # inc LDAXR
+STLR            .. 001000 100 11111 . 11111 ..... ..... @stlr  # inc STLLR
+LDAR            .. 001000 110 11111 . 11111 ..... ..... @stlr  # inc LDLAR
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index a2a71b4062f..1ba2d6a75e4 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -2652,6 +2652,95 @@ static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
     return regsize == 64;
 }
 
+static bool ldst_iss_sf(int size, bool sign, bool ext)
+{
+
+    if (sign) {
+        /*
+         * Signed loads are 64 bit results if we are not going to
+         * do a zero-extend from 32 to 64 after the load.
+         * (For a store, sign and ext are always false.)
+         */
+        return !ext;
+    } else {
+        /* Unsigned loads/stores work at the specified size */
+        return size == MO_64;
+    }
+}
+
+static bool trans_STXR(DisasContext *s, arg_stxr *a)
+{
+    if (a->rn == 31) {
+        gen_check_sp_alignment(s);
+    }
+    if (a->lasr) {
+        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+    }
+    gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, false);
+    return true;
+}
+
+static bool trans_LDXR(DisasContext *s, arg_stxr *a)
+{
+    if (a->rn == 31) {
+        gen_check_sp_alignment(s);
+    }
+    gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, false);
+    if (a->lasr) {
+        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+    }
+    return true;
+}
+
+static bool trans_STLR(DisasContext *s, arg_stlr *a)
+{
+    TCGv_i64 clean_addr;
+    MemOp memop;
+    bool iss_sf = ldst_iss_sf(a->sz, false, false);
+
+    /*
+     * StoreLORelease is the same as Store-Release for QEMU, but
+     * needs the feature-test.
+     */
+    if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
+        return false;
+    }
+    /* Generate ISS for non-exclusive accesses including LASR.  */
+    if (a->rn == 31) {
+        gen_check_sp_alignment(s);
+    }
+    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+    memop = check_ordered_align(s, a->rn, 0, true, a->sz);
+    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+                                true, a->rn != 31, memop);
+    do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, memop, true, a->rt,
+              iss_sf, a->lasr);
+    return true;
+}
+
+static bool trans_LDAR(DisasContext *s, arg_stlr *a)
+{
+    TCGv_i64 clean_addr;
+    MemOp memop;
+    bool iss_sf = ldst_iss_sf(a->sz, false, false);
+
+    /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
+    if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
+        return false;
+    }
+    /* Generate ISS for non-exclusive accesses including LASR.  */
+    if (a->rn == 31) {
+        gen_check_sp_alignment(s);
+    }
+    memop = check_ordered_align(s, a->rn, 0, false, a->sz);
+    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+                                false, a->rn != 31, memop);
+    do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, memop, false, true,
+              a->rt, iss_sf, a->lasr);
+    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+    return true;
+}
+
 /* Load/store exclusive
  *
  *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
@@ -2674,70 +2763,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
     int is_lasr = extract32(insn, 15, 1);
     int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
     int size = extract32(insn, 30, 2);
-    TCGv_i64 clean_addr;
-    MemOp memop;
 
     switch (o2_L_o1_o0) {
-    case 0x0: /* STXR */
-    case 0x1: /* STLXR */
-        if (rn == 31) {
-            gen_check_sp_alignment(s);
-        }
-        if (is_lasr) {
-            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
-        }
-        gen_store_exclusive(s, rs, rt, rt2, rn, size, false);
-        return;
-
-    case 0x4: /* LDXR */
-    case 0x5: /* LDAXR */
-        if (rn == 31) {
-            gen_check_sp_alignment(s);
-        }
-        gen_load_exclusive(s, rt, rt2, rn, size, false);
-        if (is_lasr) {
-            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
-        }
-        return;
-
-    case 0x8: /* STLLR */
-        if (!dc_isar_feature(aa64_lor, s)) {
-            break;
-        }
-        /* StoreLORelease is the same as Store-Release for QEMU.  */
-        /* fall through */
-    case 0x9: /* STLR */
-        /* Generate ISS for non-exclusive accesses including LASR.  */
-        if (rn == 31) {
-            gen_check_sp_alignment(s);
-        }
-        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
-        memop = check_ordered_align(s, rn, 0, true, size);
-        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                    true, rn != 31, memop);
-        do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt,
-                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
-        return;
-
-    case 0xc: /* LDLAR */
-        if (!dc_isar_feature(aa64_lor, s)) {
-            break;
-        }
-        /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
-        /* fall through */
-    case 0xd: /* LDAR */
-        /* Generate ISS for non-exclusive accesses including LASR.  */
-        if (rn == 31) {
-            gen_check_sp_alignment(s);
-        }
-        memop = check_ordered_align(s, rn, 0, false, size);
-        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
-                                    false, rn != 31, memop);
-        do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true,
-                  rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
-        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
-        return;
-
     case 0x2: case 0x3: /* CASP / STXP */
         if (size & 2) { /* STXP / STLXP */
             if (rn == 31) {
@@ -2787,6 +2814,9 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
             return;
         }
         break;
+    default:
+        /* Handled in decodetree */
+        break;
     }
     unallocated_encoding(s);
 }
-- 
2.34.1



  parent reply	other threads:[~2023-06-19 14:30 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-19 14:28 [PULL 00/33] target-arm queue Peter Maydell
2023-06-19 14:28 ` [PULL 01/33] target/arm: Fix return value from LDSMIN/LDSMAX 8/16 bit atomics Peter Maydell
2023-06-19 14:28 ` [PULL 02/33] target/arm: Return correct result for LDG when ATA=0 Peter Maydell
2023-06-19 14:28 ` [PULL 03/33] target/arm: Pass memop to gen_mte_check1_mmuidx() in reg_imm9 decode Peter Maydell
2023-06-19 14:28 ` [PULL 04/33] target/arm: Consistently use finalize_memop_asimd() for ASIMD loads/stores Peter Maydell
2023-06-19 14:28 ` [PULL 05/33] target/arm: Convert hint instruction space to decodetree Peter Maydell
2023-06-19 14:28 ` [PULL 06/33] target/arm: Convert barrier insns " Peter Maydell
2023-06-19 14:28 ` [PULL 07/33] target/arm: Convert CFINV, XAFLAG and AXFLAG " Peter Maydell
2023-06-19 14:28 ` [PULL 08/33] target/arm: Convert MSR (immediate) " Peter Maydell
2023-06-19 14:28 ` [PULL 09/33] target/arm: Convert MSR (reg), MRS, SYS, SYSL " Peter Maydell
2023-06-19 14:28 ` [PULL 10/33] target/arm: Convert exception generation instructions " Peter Maydell
2023-06-19 14:28 ` Peter Maydell [this message]
2023-06-19 14:28 ` [PULL 12/33] target/arm: Convert LDXP, STXP, CASP, CAS " Peter Maydell
2023-06-19 14:28 ` [PULL 13/33] target/arm: Convert load reg (literal) group " Peter Maydell
2023-06-19 14:28 ` [PULL 14/33] target/arm: Convert load/store-pair " Peter Maydell
2023-06-19 14:28 ` [PULL 15/33] target/arm: Convert ld/st reg+imm9 insns " Peter Maydell
2023-06-19 14:28 ` [PULL 16/33] target/arm: Convert LDR/STR with 12-bit immediate " Peter Maydell
2023-06-19 14:28 ` [PULL 17/33] target/arm: Convert LDR/STR reg+reg " Peter Maydell
2023-06-19 14:28 ` [PULL 18/33] target/arm: Convert atomic memory ops " Peter Maydell
2023-06-19 14:29 ` [PULL 19/33] target/arm: Convert load (pointer auth) insns " Peter Maydell
2023-06-19 14:29 ` [PULL 20/33] target/arm: Convert LDAPR/STLR (imm) " Peter Maydell
2023-06-19 14:29 ` [PULL 21/33] target/arm: Convert load/store (multiple structures) " Peter Maydell
2023-06-19 14:29 ` [PULL 22/33] target/arm: Convert load/store single structure " Peter Maydell
2023-06-19 14:29 ` [PULL 23/33] target/arm: Convert load/store tags insns " Peter Maydell
2023-06-19 14:29 ` [PULL 24/33] hw/intc/allwinner-a10-pic: Handle IRQ levels other than 0 or 1 Peter Maydell
2023-06-19 14:29 ` [PULL 25/33] hw/sd/allwinner-sdhost: Don't send non-boolean IRQ line levels Peter Maydell
2023-06-19 14:29 ` [PULL 26/33] hw/timer/nrf51_timer: Don't lose time when timer is queried in tight loop Peter Maydell
2023-06-19 14:29 ` [PULL 27/33] hw/arm/Kconfig: sbsa-ref uses Bochs display Peter Maydell
2023-06-19 14:29 ` [PULL 28/33] imx_serial: set wake bit when we receive a data byte Peter Maydell
2023-06-19 14:29 ` [PULL 29/33] docs: sbsa: document board to firmware interface Peter Maydell
2023-06-19 14:29 ` [PULL 30/33] hw/arm/raspi: Import Linux raspi definitions as 'raspberrypi-fw-defs.h' Peter Maydell
2023-06-19 14:29 ` [PULL 31/33] hw/misc/bcm2835_property: Use 'raspberrypi-fw-defs.h' definitions Peter Maydell
2023-06-19 14:29 ` [PULL 32/33] hw/misc/bcm2835_property: Replace magic frequency values by definitions Peter Maydell
2023-06-19 14:29 ` [PULL 33/33] hw/misc/bcm2835_property: Handle CORE_CLK_ID firmware property Peter Maydell
2023-06-19 16:58 ` [PULL 00/33] target-arm queue Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230619142914.963184-12-peter.maydell@linaro.org \
    --to=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.