* [Qemu-devel] [PATCH v2 0/3] cputlb: Adjust tlb bswap implementation
@ 2019-09-12 19:59 Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 1/3] cputlb: Disable __always_inline__ without optimization Richard Henderson
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Richard Henderson @ 2019-09-12 19:59 UTC (permalink / raw)
To: qemu-devel; +Cc: tony.nguyen, mark.cave-ayland
Changes from v1:
* Move QEMU_ALWAYS_INLINE to qemu/compiler.h.
* Rename some inline wrapper functions.
* Don't break TLB_NOTDIRTY in patch 3.
Blurb from v1:
The version that Tony came up with, and I reviewed, doesn't actually
work when applied to RAM. It only worked for i/o memory. This was
the root cause for
https://lists.gnu.org/archive/html/qemu-devel/2019-09/msg00036.html
I tried a couple of different approaches in load/store_helper, but
this is the one that didn't affect the normal case -- a simple tlb
miss against (non-swapped) ram.
r~
Richard Henderson (3):
cputlb: Disable __always_inline__ without optimization
cputlb: Replace switches in load/store_helper with callback
cputlb: Introduce TLB_BSWAP
include/exec/cpu-all.h | 2 +
include/qemu/compiler.h | 11 ++
accel/tcg/cputlb.c | 235 ++++++++++++++++++++--------------------
3 files changed, 132 insertions(+), 116 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH v2 1/3] cputlb: Disable __always_inline__ without optimization
2019-09-12 19:59 [Qemu-devel] [PATCH v2 0/3] cputlb: Adjust tlb bswap implementation Richard Henderson
@ 2019-09-12 19:59 ` Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 2/3] cputlb: Replace switches in load/store_helper with callback Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 3/3] cputlb: Introduce TLB_BSWAP Richard Henderson
2 siblings, 0 replies; 4+ messages in thread
From: Richard Henderson @ 2019-09-12 19:59 UTC (permalink / raw)
To: qemu-devel; +Cc: tony.nguyen, mark.cave-ayland
This forced inlining can result in missing symbols,
which makes a debugging build harder to follow.
Reported-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/qemu/compiler.h | 11 +++++++++++
accel/tcg/cputlb.c | 4 ++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index 09fc44cca4..d6d400c523 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -170,6 +170,17 @@
# define QEMU_NONSTRING
#endif
+/*
+ * Forced inlining may be desired to encourage constant propagation
+ * of function parameters. However, it can also make debugging harder,
+ * so disable it for a non-optimizing build.
+ */
+#if defined(__OPTIMIZE__) && __has_attribute(always_inline)
+#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
+#else
+#define QEMU_ALWAYS_INLINE
+#endif
+
/* Implement C11 _Generic via GCC builtins. Example:
*
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index abae79650c..2222b87764 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1281,7 +1281,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
-static inline uint64_t __attribute__((always_inline))
+static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
@@ -1530,7 +1530,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
* Store Helpers
*/
-static inline void __attribute__((always_inline))
+static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
{
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH v2 2/3] cputlb: Replace switches in load/store_helper with callback
2019-09-12 19:59 [Qemu-devel] [PATCH v2 0/3] cputlb: Adjust tlb bswap implementation Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 1/3] cputlb: Disable __always_inline__ without optimization Richard Henderson
@ 2019-09-12 19:59 ` Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 3/3] cputlb: Introduce TLB_BSWAP Richard Henderson
2 siblings, 0 replies; 4+ messages in thread
From: Richard Henderson @ 2019-09-12 19:59 UTC (permalink / raw)
To: qemu-devel; +Cc: tony.nguyen, mark.cave-ayland
Add a function parameter to perform the actual load/store to ram.
With optimization, this results in identical code.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 159 +++++++++++++++++++++++----------------------
1 file changed, 83 insertions(+), 76 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 2222b87764..b4a63d3928 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1280,11 +1280,38 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
+typedef uint64_t LoadHelper(const void *);
+
+/* Wrap the unaligned load helpers to that they have a common signature. */
+static inline uint64_t wrap_ldub(const void *haddr)
+{
+ return ldub_p(haddr);
+}
+
+static inline uint64_t wrap_lduw_be(const void *haddr)
+{
+ return lduw_be_p(haddr);
+}
+
+static inline uint64_t wrap_lduw_le(const void *haddr)
+{
+ return lduw_le_p(haddr);
+}
+
+static inline uint64_t wrap_ldul_be(const void *haddr)
+{
+ return (uint32_t)ldl_be_p(haddr);
+}
+
+static inline uint64_t wrap_ldul_le(const void *haddr)
+{
+ return (uint32_t)ldl_le_p(haddr);
+}
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
- FullLoadHelper *full_load)
+ FullLoadHelper *full_load, LoadHelper *direct)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1373,33 +1400,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
- switch (op) {
- case MO_UB:
- res = ldub_p(haddr);
- break;
- case MO_BEUW:
- res = lduw_be_p(haddr);
- break;
- case MO_LEUW:
- res = lduw_le_p(haddr);
- break;
- case MO_BEUL:
- res = (uint32_t)ldl_be_p(haddr);
- break;
- case MO_LEUL:
- res = (uint32_t)ldl_le_p(haddr);
- break;
- case MO_BEQ:
- res = ldq_be_p(haddr);
- break;
- case MO_LEQ:
- res = ldq_le_p(haddr);
- break;
- default:
- g_assert_not_reached();
- }
-
- return res;
+ return direct(haddr);
}
/*
@@ -1415,7 +1416,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
+ return load_helper(env, addr, oi, retaddr, MO_UB, false,
+ full_ldub_mmu, wrap_ldub);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
@@ -1428,7 +1430,7 @@ static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
- full_le_lduw_mmu);
+ full_le_lduw_mmu, wrap_lduw_le);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -1441,7 +1443,7 @@ static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
- full_be_lduw_mmu);
+ full_be_lduw_mmu, wrap_lduw_be);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -1454,7 +1456,7 @@ static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
- full_le_ldul_mmu);
+ full_le_ldul_mmu, wrap_ldul_le);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -1467,7 +1469,7 @@ static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
- full_be_ldul_mmu);
+ full_be_ldul_mmu, wrap_ldul_be);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -1480,14 +1482,14 @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
- helper_le_ldq_mmu);
+ helper_le_ldq_mmu, ldq_le_p);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
- helper_be_ldq_mmu);
+ helper_be_ldq_mmu, ldq_be_p);
}
/*
@@ -1530,9 +1532,38 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
* Store Helpers
*/
+typedef void StoreHelper(void *, uint64_t);
+
+/* Wrap the unaligned store helpers to that they have a common signature. */
+static inline void wrap_stb(void *haddr, uint64_t val)
+{
+ stb_p(haddr, val);
+}
+
+static inline void wrap_stw_be(void *haddr, uint64_t val)
+{
+ stw_be_p(haddr, val);
+}
+
+static inline void wrap_stw_le(void *haddr, uint64_t val)
+{
+ stw_le_p(haddr, val);
+}
+
+static inline void wrap_stl_be(void *haddr, uint64_t val)
+{
+ stl_be_p(haddr, val);
+}
+
+static inline void wrap_stl_le(void *haddr, uint64_t val)
+{
+ stl_le_p(haddr, val);
+}
+
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
+ TCGMemOpIdx oi, uintptr_t retaddr, MemOp op,
+ StoreHelper *direct)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1657,74 +1688,49 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
- switch (op) {
- case MO_UB:
- stb_p(haddr, val);
- break;
- case MO_BEUW:
- stw_be_p(haddr, val);
- break;
- case MO_LEUW:
- stw_le_p(haddr, val);
- break;
- case MO_BEUL:
- stl_be_p(haddr, val);
- break;
- case MO_LEUL:
- stl_le_p(haddr, val);
- break;
- case MO_BEQ:
- stq_be_p(haddr, val);
- break;
- case MO_LEQ:
- stq_le_p(haddr, val);
- break;
- default:
- g_assert_not_reached();
- break;
- }
+ direct(haddr, val);
}
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_UB);
+ store_helper(env, addr, val, oi, retaddr, MO_UB, wrap_stb);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEUW);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUW, wrap_stw_le);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEUW);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUW, wrap_stw_be);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEUL);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUL, wrap_stl_le);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEUL);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUL, wrap_stl_be);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEQ);
+ store_helper(env, addr, val, oi, retaddr, MO_LEQ, stq_le_p);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEQ);
+ store_helper(env, addr, val, oi, retaddr, MO_BEQ, stq_be_p);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
@@ -1789,7 +1795,8 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
+ return load_helper(env, addr, oi, retaddr, MO_8, true,
+ full_ldub_cmmu, wrap_ldub);
}
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
@@ -1802,7 +1809,7 @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
- full_le_lduw_cmmu);
+ full_le_lduw_cmmu, wrap_lduw_le);
}
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
@@ -1815,7 +1822,7 @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
- full_be_lduw_cmmu);
+ full_be_lduw_cmmu, wrap_lduw_be);
}
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
@@ -1828,7 +1835,7 @@ static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
- full_le_ldul_cmmu);
+ full_le_ldul_cmmu, wrap_ldul_le);
}
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
@@ -1841,7 +1848,7 @@ static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
- full_be_ldul_cmmu);
+ full_be_ldul_cmmu, wrap_ldul_be);
}
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
@@ -1854,12 +1861,12 @@ uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
- helper_le_ldq_cmmu);
+ helper_le_ldq_cmmu, ldq_le_p);
}
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
- helper_be_ldq_cmmu);
+ helper_be_ldq_cmmu, ldq_be_p);
}
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [Qemu-devel] [PATCH v2 3/3] cputlb: Introduce TLB_BSWAP
2019-09-12 19:59 [Qemu-devel] [PATCH v2 0/3] cputlb: Adjust tlb bswap implementation Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 1/3] cputlb: Disable __always_inline__ without optimization Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 2/3] cputlb: Replace switches in load/store_helper with callback Richard Henderson
@ 2019-09-12 19:59 ` Richard Henderson
2 siblings, 0 replies; 4+ messages in thread
From: Richard Henderson @ 2019-09-12 19:59 UTC (permalink / raw)
To: qemu-devel; +Cc: tony.nguyen, mark.cave-ayland
Handle bswap on ram directly in load/store_helper. This fixes a
bug with the previous implementation in that one cannot use the
I/O path for RAM.
Fixes: a26fc6f5152b47f1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 2 +
accel/tcg/cputlb.c | 118 ++++++++++++++++++++---------------------
2 files changed, 59 insertions(+), 61 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index d2d443c4f9..3928edab9a 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -331,6 +331,8 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS - 5))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b4a63d3928..354a75927a 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -737,8 +737,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
address |= TLB_INVALID_MASK;
}
if (attrs.byte_swap) {
- /* Force the access through the I/O slow path. */
- address |= TLB_MMIO;
+ address |= TLB_BSWAP;
}
if (!memory_region_is_ram(section->mr) &&
!memory_region_is_romd(section->mr)) {
@@ -901,10 +900,6 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (iotlbentry->attrs.byte_swap) {
- op ^= MO_BSWAP;
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -947,10 +942,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (iotlbentry->attrs.byte_swap) {
- op ^= MO_BSWAP;
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -1311,7 +1302,8 @@ static inline uint64_t wrap_ldul_le(const void *haddr)
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
- FullLoadHelper *full_load, LoadHelper *direct)
+ FullLoadHelper *full_load, LoadHelper *direct,
+ LoadHelper *direct_swap)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1361,26 +1353,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr);
-
- /* The backing page may or may not require I/O. */
- tlb_addr &= ~TLB_WATCHPOINT;
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
- goto do_aligned_access;
- }
}
/* Handle I/O access. */
- return io_readx(env, iotlbentry, mmu_idx, addr,
- retaddr, access_type, op);
- }
+ if (likely(tlb_addr & TLB_MMIO)) {
+ return io_readx(env, iotlbentry, mmu_idx, addr,
+ retaddr, access_type,
+ op ^ (tlb_addr & TLB_BSWAP ? MO_BSWAP : 0));
+ }
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (size > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
- >= TARGET_PAGE_SIZE)) {
+ if (unlikely(tlb_addr & TLB_BSWAP)) {
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+ return direct_swap(haddr);
+ }
+ } else if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ /* Handle slow unaligned access (it spans two pages or IO). */
target_ulong addr1, addr2;
uint64_t r1, r2;
unsigned shift;
+
do_unaligned_access:
addr1 = addr & ~((target_ulong)size - 1);
addr2 = addr1 + size;
@@ -1398,7 +1391,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
return res & MAKE_64BIT_MASK(0, size * 8);
}
- do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
return direct(haddr);
}
@@ -1417,7 +1409,7 @@ static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_UB, false,
- full_ldub_mmu, wrap_ldub);
+ full_ldub_mmu, wrap_ldub, wrap_ldub);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
@@ -1430,7 +1422,7 @@ static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
- full_le_lduw_mmu, wrap_lduw_le);
+ full_le_lduw_mmu, wrap_lduw_le, wrap_lduw_be);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -1443,7 +1435,7 @@ static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
- full_be_lduw_mmu, wrap_lduw_be);
+ full_be_lduw_mmu, wrap_lduw_be, wrap_lduw_le);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
@@ -1456,7 +1448,7 @@ static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
- full_le_ldul_mmu, wrap_ldul_le);
+ full_le_ldul_mmu, wrap_ldul_le, wrap_ldul_be);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -1469,7 +1461,7 @@ static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
- full_be_ldul_mmu, wrap_ldul_be);
+ full_be_ldul_mmu, wrap_ldul_be, wrap_ldul_le);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
@@ -1482,14 +1474,14 @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
- helper_le_ldq_mmu, ldq_le_p);
+ helper_le_ldq_mmu, ldq_le_p, ldq_be_p);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
- helper_be_ldq_mmu, ldq_be_p);
+ helper_be_ldq_mmu, ldq_be_p, ldq_le_p);
}
/*
@@ -1563,7 +1555,7 @@ static inline void wrap_stl_le(void *haddr, uint64_t val)
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op,
- StoreHelper *direct)
+ StoreHelper *direct, StoreHelper *direct_swap)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1608,23 +1600,24 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
-
- /* The backing page may or may not require I/O. */
- tlb_addr &= ~TLB_WATCHPOINT;
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
- goto do_aligned_access;
- }
}
/* Handle I/O access. */
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
- return;
- }
+ if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
+ op ^ (tlb_addr & TLB_BSWAP ? MO_BSWAP : 0));
+ return;
+ }
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (size > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
- >= TARGET_PAGE_SIZE)) {
+ if (unlikely(tlb_addr & TLB_BSWAP)) {
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+ direct_swap(haddr, val);
+ return;
+ }
+ } else if (size > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+ >= TARGET_PAGE_SIZE)) {
+ /* Handle slow unaligned access (it spans two pages or IO). */
int i;
uintptr_t index2;
CPUTLBEntry *entry2;
@@ -1686,7 +1679,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return;
}
- do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
direct(haddr, val);
}
@@ -1694,43 +1686,47 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_UB, wrap_stb);
+ store_helper(env, addr, val, oi, retaddr, MO_UB, wrap_stb, wrap_stb);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEUW, wrap_stw_le);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUW,
+ wrap_stw_le, wrap_stw_be);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEUW, wrap_stw_be);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUW,
+ wrap_stw_be, wrap_stw_le);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEUL, wrap_stl_le);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUL,
+ wrap_stl_le, wrap_stl_be);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEUL, wrap_stl_be);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUL,
+ wrap_stl_be, wrap_stl_le);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_LEQ, stq_le_p);
+ store_helper(env, addr, val, oi, retaddr, MO_LEQ, stq_le_p, stq_be_p);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- store_helper(env, addr, val, oi, retaddr, MO_BEQ, stq_be_p);
+ store_helper(env, addr, val, oi, retaddr, MO_BEQ, stq_be_p, stq_le_p);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
@@ -1796,7 +1792,7 @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true,
- full_ldub_cmmu, wrap_ldub);
+ full_ldub_cmmu, wrap_ldub, wrap_ldub);
}
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
@@ -1809,7 +1805,7 @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
- full_le_lduw_cmmu, wrap_lduw_le);
+ full_le_lduw_cmmu, wrap_lduw_le, wrap_lduw_be);
}
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
@@ -1822,7 +1818,7 @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
- full_be_lduw_cmmu, wrap_lduw_be);
+ full_be_lduw_cmmu, wrap_lduw_be, wrap_lduw_le);
}
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
@@ -1835,7 +1831,7 @@ static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
- full_le_ldul_cmmu, wrap_ldul_le);
+ full_le_ldul_cmmu, wrap_ldul_le, wrap_ldul_be);
}
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
@@ -1848,7 +1844,7 @@ static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
- full_be_ldul_cmmu, wrap_ldul_be);
+ full_be_ldul_cmmu, wrap_ldul_be, wrap_ldul_le);
}
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
@@ -1861,12 +1857,12 @@ uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
- helper_le_ldq_cmmu, ldq_le_p);
+ helper_le_ldq_cmmu, ldq_le_p, ldq_be_p);
}
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
- helper_be_ldq_cmmu, ldq_be_p);
+ helper_be_ldq_cmmu, ldq_be_p, ldq_le_p);
}
--
2.17.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-09-12 20:04 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-12 19:59 [Qemu-devel] [PATCH v2 0/3] cputlb: Adjust tlb bswap implementation Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 1/3] cputlb: Disable __always_inline__ without optimization Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 2/3] cputlb: Replace switches in load/store_helper with callback Richard Henderson
2019-09-12 19:59 ` [Qemu-devel] [PATCH v2 3/3] cputlb: Introduce TLB_BSWAP Richard Henderson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).