qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/1] accel/tcg: Fix #390 and other atomicity musings
@ 2021-06-17  1:12 Richard Henderson
  2021-06-17  1:12 ` [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops Richard Henderson
  0 siblings, 1 reply; 6+ messages in thread
From: Richard Henderson @ 2021-06-17  1:12 UTC (permalink / raw)
  To: qemu-devel; +Cc: alex.bennee, matheus.ferst, david

This fixes some bugs reported against 128-bit atomic operations.

Just a note that the ppc insns that uses this, LQ and STQ, do not require
atomic operations if the address is unaligned, or if the address does not
resolve to ram.  So for some things we are working harder than required.

I've also had a good read of Power's atomicity requirements, for all
instructions.  It requires that the lsb of the address control the
minimum atomicity.  E.g. for (addr % size) == 2, each 2-byte component
must be atomic.

Which is certainly not what we're doing at the bottom of our memory
model at present.

I've also been reading up on Arm's FEAT_LSE2, which is mandatory for v8.4.
This vastly strengthens the single-copy atomicity requirements for the
whole system.  Strikingly, any access that does not cross a 16-byte
boundary -- aligned or unaligned -- is now single-copy atomic.

In both cases, I would imagine that we should only allow the softmmu
fast path for aligned accesses.  That's single-copy atomic on all hosts.
But then we need different handling for each platform at the bottom
of cputlb...

Suggestions on ways to approach this that aren't overwhelmingly ugly?


r~


Richard Henderson (1):
  accel/tcg: Probe the proper permissions for atomic ops

 accel/tcg/atomic_template.h | 24 +++++-----
 accel/tcg/cputlb.c          | 95 ++++++++++++++++++++++++++-----------
 accel/tcg/user-exec.c       |  8 ++--
 3 files changed, 83 insertions(+), 44 deletions(-)

-- 
2.25.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops
  2021-06-17  1:12 [PATCH v2 0/1] accel/tcg: Fix #390 and other atomicity musings Richard Henderson
@ 2021-06-17  1:12 ` Richard Henderson
  2021-06-18 18:57   ` Matheus K. Ferst
  2021-06-21 13:21   ` Matheus K. Ferst
  0 siblings, 2 replies; 6+ messages in thread
From: Richard Henderson @ 2021-06-17  1:12 UTC (permalink / raw)
  To: qemu-devel; +Cc: alex.bennee, matheus.ferst, david

We had a single ATOMIC_MMU_LOOKUP macro that probed for
read+write on all atomic ops.  This is incorrect for
plain atomic load and atomic store.

For user-only, we rely on the host page permissions.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/390
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/atomic_template.h | 24 +++++-----
 accel/tcg/cputlb.c          | 95 ++++++++++++++++++++++++++-----------
 accel/tcg/user-exec.c       |  8 ++--
 3 files changed, 83 insertions(+), 44 deletions(-)

diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 0ff7f913e1..afa8a9daf3 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -74,7 +74,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
                               ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
     DATA_TYPE ret;
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
                                          ATOMIC_MMU_IDX);
@@ -95,7 +95,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
                                          ATOMIC_MMU_IDX);
 
@@ -110,7 +110,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
                      ABI_TYPE val EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
                                          ATOMIC_MMU_IDX);
 
@@ -125,7 +125,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
                            ABI_TYPE val EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
     DATA_TYPE ret;
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
                                          ATOMIC_MMU_IDX);
@@ -142,7 +142,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                         ABI_TYPE val EXTRA_ARGS)                    \
 {                                                                   \
     ATOMIC_MMU_DECLS;                                               \
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
     DATA_TYPE ret;                                                  \
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
                                          ATOMIC_MMU_IDX);           \
@@ -176,7 +176,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                         ABI_TYPE xval EXTRA_ARGS)                   \
 {                                                                   \
     ATOMIC_MMU_DECLS;                                               \
-    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
+    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
     XDATA_TYPE cmp, old, new, val = xval;                           \
     uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
                                          ATOMIC_MMU_IDX);           \
@@ -221,7 +221,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
                               ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
     DATA_TYPE ret;
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
                                          ATOMIC_MMU_IDX);
@@ -242,7 +242,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
                                          ATOMIC_MMU_IDX);
 
@@ -257,7 +257,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
                      ABI_TYPE val EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
                                          ATOMIC_MMU_IDX);
 
@@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
                            ABI_TYPE val EXTRA_ARGS)
 {
     ATOMIC_MMU_DECLS;
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
     ABI_TYPE ret;
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
                                          ATOMIC_MMU_IDX);
@@ -291,7 +291,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                         ABI_TYPE val EXTRA_ARGS)                    \
 {                                                                   \
     ATOMIC_MMU_DECLS;                                               \
-    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
+    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
     DATA_TYPE ret;                                                  \
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
                                          false, ATOMIC_MMU_IDX);    \
@@ -323,7 +323,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                         ABI_TYPE xval EXTRA_ARGS)                   \
 {                                                                   \
     ATOMIC_MMU_DECLS;                                               \
-    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
+    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
     XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
     uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
                                          false, ATOMIC_MMU_IDX);    \
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f24348e979..b6d5fc6326 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
 
 #endif
 
-/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
- * operations, or io operations to proceed.  Return the host address.  */
+/*
+ * Probe for an atomic operation.  Do not allow unaligned operations,
+ * or io operations to proceed.  Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
-                               TCGMemOpIdx oi, uintptr_t retaddr)
+                               TCGMemOpIdx oi, int size, int prot,
+                               uintptr_t retaddr)
 {
     size_t mmu_idx = get_mmuidx(oi);
-    uintptr_t index = tlb_index(env, mmu_idx, addr);
-    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
-    target_ulong tlb_addr = tlb_addr_write(tlbe);
     MemOp mop = get_memop(oi);
     int a_bits = get_alignment_bits(mop);
-    int s_bits = mop & MO_SIZE;
+    uintptr_t index;
+    CPUTLBEntry *tlbe;
+    target_ulong tlb_addr;
     void *hostaddr;
 
     /* Adjust the given return address.  */
@@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
     }
 
     /* Enforce qemu required alignment.  */
-    if (unlikely(addr & ((1 << s_bits) - 1))) {
+    if (unlikely(addr & (size - 1))) {
         /* We get here if guest alignment was not requested,
            or was not enforced by cpu_unaligned_access above.
            We might widen the access and emulate, but for now
@@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
         goto stop_the_world;
     }
 
+    index = tlb_index(env, mmu_idx, addr);
+    tlbe = tlb_entry(env, mmu_idx, addr);
+
     /* Check TLB entry and enforce page permissions.  */
-    if (!tlb_hit(tlb_addr, addr)) {
-        if (!VICTIM_TLB_HIT(addr_write, addr)) {
-            tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
-                     mmu_idx, retaddr);
-            index = tlb_index(env, mmu_idx, addr);
-            tlbe = tlb_entry(env, mmu_idx, addr);
+    if (prot & PAGE_WRITE) {
+        tlb_addr = tlb_addr_write(tlbe);
+        if (!tlb_hit(tlb_addr, addr)) {
+            if (!VICTIM_TLB_HIT(addr_write, addr)) {
+                tlb_fill(env_cpu(env), addr, size,
+                         MMU_DATA_STORE, mmu_idx, retaddr);
+                index = tlb_index(env, mmu_idx, addr);
+                tlbe = tlb_entry(env, mmu_idx, addr);
+            }
+            tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+        }
+
+        /* Let the guest notice RMW on a write-only page.  */
+        if ((prot & PAGE_READ) &&
+            unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
+            tlb_fill(env_cpu(env), addr, size,
+                     MMU_DATA_LOAD, mmu_idx, retaddr);
+            /*
+             * Since we don't support reads and writes to different addresses,
+             * and we do have the proper page loaded for write, this shouldn't
+             * ever return.  But just in case, handle via stop-the-world.
+             */
+            goto stop_the_world;
+        }
+    } else /* if (prot & PAGE_READ) */ {
+        tlb_addr = tlbe->addr_read;
+        if (!tlb_hit(tlb_addr, addr)) {
+            if (!VICTIM_TLB_HIT(addr_write, addr)) {
+                tlb_fill(env_cpu(env), addr, size,
+                         MMU_DATA_LOAD, mmu_idx, retaddr);
+                index = tlb_index(env, mmu_idx, addr);
+                tlbe = tlb_entry(env, mmu_idx, addr);
+            }
+            tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
         }
-        tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
     }
 
     /* Notice an IO access or a needs-MMU-lookup access */
@@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
         goto stop_the_world;
     }
 
-    /* Let the guest notice RMW on a write-only page.  */
-    if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
-        tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
-                 mmu_idx, retaddr);
-        /* Since we don't support reads and writes to different addresses,
-           and we do have the proper page loaded for write, this shouldn't
-           ever return.  But just in case, handle via stop-the-world.  */
-        goto stop_the_world;
-    }
-
     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
 
     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
-        notdirty_write(env_cpu(env), addr, 1 << s_bits,
+        notdirty_write(env_cpu(env), addr, size,
                        &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
     }
 
@@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
 #define ATOMIC_NAME(X) \
     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
 #define ATOMIC_MMU_DECLS
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
+#define ATOMIC_MMU_LOOKUP_RW \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
+#define ATOMIC_MMU_LOOKUP_R \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
+#define ATOMIC_MMU_LOOKUP_W \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
 #define ATOMIC_MMU_CLEANUP
 #define ATOMIC_MMU_IDX   get_mmuidx(oi)
 
@@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
 
 #undef EXTRA_ARGS
 #undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP
+#undef ATOMIC_MMU_LOOKUP_RW
+#undef ATOMIC_MMU_LOOKUP_R
+#undef ATOMIC_MMU_LOOKUP_W
+
 #define EXTRA_ARGS         , TCGMemOpIdx oi
 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
+#define ATOMIC_MMU_LOOKUP_RW \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
+#define ATOMIC_MMU_LOOKUP_R \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
+#define ATOMIC_MMU_LOOKUP_W \
+    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
 
 #define DATA_SIZE 1
 #include "atomic_template.h"
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index fb2d43e6a9..e67b1617b5 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -1220,7 +1220,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 
 /* Macro to call the above, with local variables from the use context.  */
 #define ATOMIC_MMU_DECLS do {} while (0)
-#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
+#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
+#define ATOMIC_MMU_LOOKUP_R   ATOMIC_MMU_LOOKUP_RW
+#define ATOMIC_MMU_LOOKUP_W   ATOMIC_MMU_LOOKUP_RW
 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
 #define ATOMIC_MMU_IDX MMU_USER_IDX
 
@@ -1250,12 +1252,12 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 
 #undef EXTRA_ARGS
 #undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP
+#undef ATOMIC_MMU_LOOKUP_RW
 
 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
 #define ATOMIC_NAME(X) \
     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
+#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
 
 #define DATA_SIZE 16
 #include "atomic_template.h"
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops
  2021-06-17  1:12 ` [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops Richard Henderson
@ 2021-06-18 18:57   ` Matheus K. Ferst
  2021-06-18 19:32     ` Richard Henderson
  2021-06-21 13:21   ` Matheus K. Ferst
  1 sibling, 1 reply; 6+ messages in thread
From: Matheus K. Ferst @ 2021-06-18 18:57 UTC (permalink / raw)
  To: Richard Henderson, qemu-devel; +Cc: alex.bennee, david

On 16/06/2021 22:12, Richard Henderson wrote:
> We had a single ATOMIC_MMU_LOOKUP macro that probed for
> read+write on all atomic ops.  This is incorrect for
> plain atomic load and atomic store.
> 
> For user-only, we rely on the host page permissions.
> 
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/390
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/atomic_template.h | 24 +++++-----
>   accel/tcg/cputlb.c          | 95 ++++++++++++++++++++++++++-----------
>   accel/tcg/user-exec.c       |  8 ++--
>   3 files changed, 83 insertions(+), 44 deletions(-)
> 
> diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
> index 0ff7f913e1..afa8a9daf3 100644
> --- a/accel/tcg/atomic_template.h
> +++ b/accel/tcg/atomic_template.h
> @@ -74,7 +74,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>                                 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
> @@ -95,7 +95,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>   ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -110,7 +110,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
>                        ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -125,7 +125,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
>                              ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
> @@ -142,7 +142,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE val EXTRA_ARGS)                    \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
>       DATA_TYPE ret;                                                  \
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
>                                            ATOMIC_MMU_IDX);           \
> @@ -176,7 +176,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE xval EXTRA_ARGS)                   \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
> +    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
>       XDATA_TYPE cmp, old, new, val = xval;                           \
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
>                                            ATOMIC_MMU_IDX);           \
> @@ -221,7 +221,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>                                 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
> @@ -242,7 +242,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>   ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -257,7 +257,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
>                        ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
>                              ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       ABI_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
> @@ -291,7 +291,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE val EXTRA_ARGS)                    \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
>       DATA_TYPE ret;                                                  \
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
>                                            false, ATOMIC_MMU_IDX);    \
> @@ -323,7 +323,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE xval EXTRA_ARGS)                   \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
> +    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
>       XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
>                                            false, ATOMIC_MMU_IDX);    \
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index f24348e979..b6d5fc6326 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
>   
>   #endif
>   
> -/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
> - * operations, or io operations to proceed.  Return the host address.  */
> +/*
> + * Probe for an atomic operation.  Do not allow unaligned operations,
> + * or io operations to proceed.  Return the host address.
> + *
> + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
> + */
>   static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
> -                               TCGMemOpIdx oi, uintptr_t retaddr)
> +                               TCGMemOpIdx oi, int size, int prot,
> +                               uintptr_t retaddr)
>   {
>       size_t mmu_idx = get_mmuidx(oi);
> -    uintptr_t index = tlb_index(env, mmu_idx, addr);
> -    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
> -    target_ulong tlb_addr = tlb_addr_write(tlbe);
>       MemOp mop = get_memop(oi);
>       int a_bits = get_alignment_bits(mop);
> -    int s_bits = mop & MO_SIZE;
> +    uintptr_t index;
> +    CPUTLBEntry *tlbe;
> +    target_ulong tlb_addr;
>       void *hostaddr;
>   
>       /* Adjust the given return address.  */
> @@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>       }
>   
>       /* Enforce qemu required alignment.  */
> -    if (unlikely(addr & ((1 << s_bits) - 1))) {
> +    if (unlikely(addr & (size - 1))) {
>           /* We get here if guest alignment was not requested,
>              or was not enforced by cpu_unaligned_access above.
>              We might widen the access and emulate, but for now
> @@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>           goto stop_the_world;
>       }
>   
> +    index = tlb_index(env, mmu_idx, addr);
> +    tlbe = tlb_entry(env, mmu_idx, addr);
> +
>       /* Check TLB entry and enforce page permissions.  */
> -    if (!tlb_hit(tlb_addr, addr)) {
> -        if (!VICTIM_TLB_HIT(addr_write, addr)) {
> -            tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
> -                     mmu_idx, retaddr);
> -            index = tlb_index(env, mmu_idx, addr);
> -            tlbe = tlb_entry(env, mmu_idx, addr);
> +    if (prot & PAGE_WRITE) {
> +        tlb_addr = tlb_addr_write(tlbe);
> +        if (!tlb_hit(tlb_addr, addr)) {
> +            if (!VICTIM_TLB_HIT(addr_write, addr)) {
> +                tlb_fill(env_cpu(env), addr, size,
> +                         MMU_DATA_STORE, mmu_idx, retaddr);
> +                index = tlb_index(env, mmu_idx, addr);
> +                tlbe = tlb_entry(env, mmu_idx, addr);
> +            }
> +            tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
> +        }
> +
> +        /* Let the guest notice RMW on a write-only page.  */
> +        if ((prot & PAGE_READ) &&
> +            unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
> +            tlb_fill(env_cpu(env), addr, size,
> +                     MMU_DATA_LOAD, mmu_idx, retaddr);
> +            /*
> +             * Since we don't support reads and writes to different addresses,
> +             * and we do have the proper page loaded for write, this shouldn't
> +             * ever return.  But just in case, handle via stop-the-world.
> +             */
> +            goto stop_the_world;
> +        }
> +    } else /* if (prot & PAGE_READ) */ {
> +        tlb_addr = tlbe->addr_read;
> +        if (!tlb_hit(tlb_addr, addr)) {
> +            if (!VICTIM_TLB_HIT(addr_write, addr)) {
> +                tlb_fill(env_cpu(env), addr, size,
> +                         MMU_DATA_LOAD, mmu_idx, retaddr);
> +                index = tlb_index(env, mmu_idx, addr);
> +                tlbe = tlb_entry(env, mmu_idx, addr);
> +            }
> +            tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
>           }
> -        tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
>       }
>   
>       /* Notice an IO access or a needs-MMU-lookup access */
> @@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>           goto stop_the_world;
>       }
>   
> -    /* Let the guest notice RMW on a write-only page.  */
> -    if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
> -        tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
> -                 mmu_idx, retaddr);
> -        /* Since we don't support reads and writes to different addresses,
> -           and we do have the proper page loaded for write, this shouldn't
> -           ever return.  But just in case, handle via stop-the-world.  */
> -        goto stop_the_world;
> -    }
> -
>       hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
>   
>       if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
> -        notdirty_write(env_cpu(env), addr, 1 << s_bits,
> +        notdirty_write(env_cpu(env), addr, size,
>                          &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
>       }
>   
> @@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
>   #define ATOMIC_NAME(X) \
>       HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
>   #define ATOMIC_MMU_DECLS
> -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
> +#define ATOMIC_MMU_LOOKUP_RW \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
> +#define ATOMIC_MMU_LOOKUP_R \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
> +#define ATOMIC_MMU_LOOKUP_W \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
>   #define ATOMIC_MMU_CLEANUP
>   #define ATOMIC_MMU_IDX   get_mmuidx(oi)
>   
> @@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
>   
>   #undef EXTRA_ARGS
>   #undef ATOMIC_NAME
> -#undef ATOMIC_MMU_LOOKUP
> +#undef ATOMIC_MMU_LOOKUP_RW
> +#undef ATOMIC_MMU_LOOKUP_R
> +#undef ATOMIC_MMU_LOOKUP_W
> +
>   #define EXTRA_ARGS         , TCGMemOpIdx oi
>   #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
> +#define ATOMIC_MMU_LOOKUP_RW \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_R \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
> +#define ATOMIC_MMU_LOOKUP_W \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
>   
>   #define DATA_SIZE 1
>   #include "atomic_template.h"
> diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
> index fb2d43e6a9..e67b1617b5 100644
> --- a/accel/tcg/user-exec.c
> +++ b/accel/tcg/user-exec.c
> @@ -1220,7 +1220,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>   
>   /* Macro to call the above, with local variables from the use context.  */
>   #define ATOMIC_MMU_DECLS do {} while (0)
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_R   ATOMIC_MMU_LOOKUP_RW
> +#define ATOMIC_MMU_LOOKUP_W   ATOMIC_MMU_LOOKUP_RW
>   #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
>   #define ATOMIC_MMU_IDX MMU_USER_IDX
>   
> @@ -1250,12 +1252,12 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>   
>   #undef EXTRA_ARGS
>   #undef ATOMIC_NAME
> -#undef ATOMIC_MMU_LOOKUP
> +#undef ATOMIC_MMU_LOOKUP_RW
>   
>   #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
>   #define ATOMIC_NAME(X) \
>       HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
> +#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
>   
>   #define DATA_SIZE 16
>   #include "atomic_template.h"
> 

I can confirm that this fixes #390, and all the other test cases that I 
have for lq. Does "Resolves:" automatically closes the issue on GitLab 
or do I need to close it manually?

-- 
Matheus K. Ferst
Instituto de Pesquisas ELDORADO <http://www.eldorado.org.br/>
Analista de Software Júnior
Aviso Legal - Disclaimer <https://www.eldorado.org.br/disclaimer.html>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops
  2021-06-18 18:57   ` Matheus K. Ferst
@ 2021-06-18 19:32     ` Richard Henderson
  2021-06-21 13:20       ` Matheus K. Ferst
  0 siblings, 1 reply; 6+ messages in thread
From: Richard Henderson @ 2021-06-18 19:32 UTC (permalink / raw)
  To: Matheus K. Ferst, qemu-devel; +Cc: alex.bennee, david

On 6/18/21 11:57 AM, Matheus K. Ferst wrote:
> I can confirm that this fixes #390, and all the other test cases that I have for lq.

Yay!  Can I get a Tested-by then?

> Does "Resolves:" automatically closes the issue on GitLab...

Yes.


r~


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops
  2021-06-18 19:32     ` Richard Henderson
@ 2021-06-21 13:20       ` Matheus K. Ferst
  0 siblings, 0 replies; 6+ messages in thread
From: Matheus K. Ferst @ 2021-06-21 13:20 UTC (permalink / raw)
  To: Richard Henderson, qemu-devel; +Cc: alex.bennee, david

On 18/06/2021 16:32, Richard Henderson wrote:
> On 6/18/21 11:57 AM, Matheus K. Ferst wrote:
>> I can confirm that this fixes #390, and all the other test cases that 
>> I have for lq.
> 
> Yay!  Can I get a Tested-by then?
> 
Ah, I forgot the tag...
Tested-by: <matheus.ferst@eldorado.org.br>

>> Does "Resolves:" automatically closes the issue on GitLab...
> 
> Yes.
> 
> 
> r~


-- 
Matheus K. Ferst
Instituto de Pesquisas ELDORADO <http://www.eldorado.org.br/>
Analista de Software Júnior
Aviso Legal - Disclaimer <https://www.eldorado.org.br/disclaimer.html>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops
  2021-06-17  1:12 ` [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops Richard Henderson
  2021-06-18 18:57   ` Matheus K. Ferst
@ 2021-06-21 13:21   ` Matheus K. Ferst
  1 sibling, 0 replies; 6+ messages in thread
From: Matheus K. Ferst @ 2021-06-21 13:21 UTC (permalink / raw)
  To: Richard Henderson, qemu-devel; +Cc: alex.bennee, david

On 16/06/2021 22:12, Richard Henderson wrote:
> We had a single ATOMIC_MMU_LOOKUP macro that probed for
> read+write on all atomic ops.  This is incorrect for
> plain atomic load and atomic store.
> 
> For user-only, we rely on the host page permissions.
> 
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/390
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/atomic_template.h | 24 +++++-----
>   accel/tcg/cputlb.c          | 95 ++++++++++++++++++++++++++-----------
>   accel/tcg/user-exec.c       |  8 ++--
>   3 files changed, 83 insertions(+), 44 deletions(-)
> 
> diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
> index 0ff7f913e1..afa8a9daf3 100644
> --- a/accel/tcg/atomic_template.h
> +++ b/accel/tcg/atomic_template.h
> @@ -74,7 +74,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>                                 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
> @@ -95,7 +95,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>   ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -110,7 +110,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
>                        ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -125,7 +125,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
>                              ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
>                                            ATOMIC_MMU_IDX);
> @@ -142,7 +142,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE val EXTRA_ARGS)                    \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
>       DATA_TYPE ret;                                                  \
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
>                                            ATOMIC_MMU_IDX);           \
> @@ -176,7 +176,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE xval EXTRA_ARGS)                   \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
> +    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
>       XDATA_TYPE cmp, old, new, val = xval;                           \
>       uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,    \
>                                            ATOMIC_MMU_IDX);           \
> @@ -221,7 +221,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>                                 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       DATA_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
> @@ -242,7 +242,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
>   ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -257,7 +257,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
>                        ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
>                                            ATOMIC_MMU_IDX);
>   
> @@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
>                              ABI_TYPE val EXTRA_ARGS)
>   {
>       ATOMIC_MMU_DECLS;
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
>       ABI_TYPE ret;
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
>                                            ATOMIC_MMU_IDX);
> @@ -291,7 +291,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE val EXTRA_ARGS)                    \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                           \
> +    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                        \
>       DATA_TYPE ret;                                                  \
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
>                                            false, ATOMIC_MMU_IDX);    \
> @@ -323,7 +323,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
>                           ABI_TYPE xval EXTRA_ARGS)                   \
>   {                                                                   \
>       ATOMIC_MMU_DECLS;                                               \
> -    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
> +    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;                       \
>       XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
>       uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP,    \
>                                            false, ATOMIC_MMU_IDX);    \
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index f24348e979..b6d5fc6326 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
>   
>   #endif
>   
> -/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
> - * operations, or io operations to proceed.  Return the host address.  */
> +/*
> + * Probe for an atomic operation.  Do not allow unaligned operations,
> + * or io operations to proceed.  Return the host address.
> + *
> + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
> + */
>   static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
> -                               TCGMemOpIdx oi, uintptr_t retaddr)
> +                               TCGMemOpIdx oi, int size, int prot,
> +                               uintptr_t retaddr)
>   {
>       size_t mmu_idx = get_mmuidx(oi);
> -    uintptr_t index = tlb_index(env, mmu_idx, addr);
> -    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
> -    target_ulong tlb_addr = tlb_addr_write(tlbe);
>       MemOp mop = get_memop(oi);
>       int a_bits = get_alignment_bits(mop);
> -    int s_bits = mop & MO_SIZE;
> +    uintptr_t index;
> +    CPUTLBEntry *tlbe;
> +    target_ulong tlb_addr;
>       void *hostaddr;
>   
>       /* Adjust the given return address.  */
> @@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>       }
>   
>       /* Enforce qemu required alignment.  */
> -    if (unlikely(addr & ((1 << s_bits) - 1))) {
> +    if (unlikely(addr & (size - 1))) {
>           /* We get here if guest alignment was not requested,
>              or was not enforced by cpu_unaligned_access above.
>              We might widen the access and emulate, but for now
> @@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>           goto stop_the_world;
>       }
>   
> +    index = tlb_index(env, mmu_idx, addr);
> +    tlbe = tlb_entry(env, mmu_idx, addr);
> +
>       /* Check TLB entry and enforce page permissions.  */
> -    if (!tlb_hit(tlb_addr, addr)) {
> -        if (!VICTIM_TLB_HIT(addr_write, addr)) {
> -            tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
> -                     mmu_idx, retaddr);
> -            index = tlb_index(env, mmu_idx, addr);
> -            tlbe = tlb_entry(env, mmu_idx, addr);
> +    if (prot & PAGE_WRITE) {
> +        tlb_addr = tlb_addr_write(tlbe);
> +        if (!tlb_hit(tlb_addr, addr)) {
> +            if (!VICTIM_TLB_HIT(addr_write, addr)) {
> +                tlb_fill(env_cpu(env), addr, size,
> +                         MMU_DATA_STORE, mmu_idx, retaddr);
> +                index = tlb_index(env, mmu_idx, addr);
> +                tlbe = tlb_entry(env, mmu_idx, addr);
> +            }
> +            tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
> +        }
> +
> +        /* Let the guest notice RMW on a write-only page.  */
> +        if ((prot & PAGE_READ) &&
> +            unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
> +            tlb_fill(env_cpu(env), addr, size,
> +                     MMU_DATA_LOAD, mmu_idx, retaddr);
> +            /*
> +             * Since we don't support reads and writes to different addresses,
> +             * and we do have the proper page loaded for write, this shouldn't
> +             * ever return.  But just in case, handle via stop-the-world.
> +             */
> +            goto stop_the_world;
> +        }
> +    } else /* if (prot & PAGE_READ) */ {
> +        tlb_addr = tlbe->addr_read;
> +        if (!tlb_hit(tlb_addr, addr)) {
> +            if (!VICTIM_TLB_HIT(addr_write, addr)) {
> +                tlb_fill(env_cpu(env), addr, size,
> +                         MMU_DATA_LOAD, mmu_idx, retaddr);
> +                index = tlb_index(env, mmu_idx, addr);
> +                tlbe = tlb_entry(env, mmu_idx, addr);
> +            }
> +            tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
>           }
> -        tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
>       }
>   
>       /* Notice an IO access or a needs-MMU-lookup access */
> @@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>           goto stop_the_world;
>       }
>   
> -    /* Let the guest notice RMW on a write-only page.  */
> -    if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
> -        tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
> -                 mmu_idx, retaddr);
> -        /* Since we don't support reads and writes to different addresses,
> -           and we do have the proper page loaded for write, this shouldn't
> -           ever return.  But just in case, handle via stop-the-world.  */
> -        goto stop_the_world;
> -    }
> -
>       hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
>   
>       if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
> -        notdirty_write(env_cpu(env), addr, 1 << s_bits,
> +        notdirty_write(env_cpu(env), addr, size,
>                          &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
>       }
>   
> @@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
>   #define ATOMIC_NAME(X) \
>       HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
>   #define ATOMIC_MMU_DECLS
> -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
> +#define ATOMIC_MMU_LOOKUP_RW \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
> +#define ATOMIC_MMU_LOOKUP_R \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
> +#define ATOMIC_MMU_LOOKUP_W \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
>   #define ATOMIC_MMU_CLEANUP
>   #define ATOMIC_MMU_IDX   get_mmuidx(oi)
>   
> @@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
>   
>   #undef EXTRA_ARGS
>   #undef ATOMIC_NAME
> -#undef ATOMIC_MMU_LOOKUP
> +#undef ATOMIC_MMU_LOOKUP_RW
> +#undef ATOMIC_MMU_LOOKUP_R
> +#undef ATOMIC_MMU_LOOKUP_W
> +
>   #define EXTRA_ARGS         , TCGMemOpIdx oi
>   #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
> +#define ATOMIC_MMU_LOOKUP_RW \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_R \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
> +#define ATOMIC_MMU_LOOKUP_W \
> +    atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
>   
>   #define DATA_SIZE 1
>   #include "atomic_template.h"
> diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
> index fb2d43e6a9..e67b1617b5 100644
> --- a/accel/tcg/user-exec.c
> +++ b/accel/tcg/user-exec.c
> @@ -1220,7 +1220,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>   
>   /* Macro to call the above, with local variables from the use context.  */
>   #define ATOMIC_MMU_DECLS do {} while (0)
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
> +#define ATOMIC_MMU_LOOKUP_R   ATOMIC_MMU_LOOKUP_RW
> +#define ATOMIC_MMU_LOOKUP_W   ATOMIC_MMU_LOOKUP_RW
>   #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
>   #define ATOMIC_MMU_IDX MMU_USER_IDX
>   
> @@ -1250,12 +1252,12 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
>   
>   #undef EXTRA_ARGS
>   #undef ATOMIC_NAME
> -#undef ATOMIC_MMU_LOOKUP
> +#undef ATOMIC_MMU_LOOKUP_RW
>   
>   #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
>   #define ATOMIC_NAME(X) \
>       HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
> -#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
> +#define ATOMIC_MMU_LOOKUP_RW  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
>   
>   #define DATA_SIZE 16
>   #include "atomic_template.h"
> 

Tested-by: <matheus.ferst@eldorado.org.br>

-- 
Matheus K. Ferst
Instituto de Pesquisas ELDORADO <http://www.eldorado.org.br/>
Analista de Software Júnior
Aviso Legal - Disclaimer <https://www.eldorado.org.br/disclaimer.html>


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-06-21 13:27 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-17  1:12 [PATCH v2 0/1] accel/tcg: Fix #390 and other atomicity musings Richard Henderson
2021-06-17  1:12 ` [PATCH v2 1/1] accel/tcg: Probe the proper permissions for atomic ops Richard Henderson
2021-06-18 18:57   ` Matheus K. Ferst
2021-06-18 19:32     ` Richard Henderson
2021-06-21 13:20       ` Matheus K. Ferst
2021-06-21 13:21   ` Matheus K. Ferst

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).