* [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS
@ 2021-05-05 3:04 Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses Rebecca Cran
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: Rebecca Cran @ 2021-05-05 3:04 UTC (permalink / raw)
To: Richard Henderson, Peter Maydell; +Cc: Rebecca Cran, qemu-devel, qemu-arm
Improved readability and fixed a bug in
tlb_flush_page_range_bits_by_mmuidx_async_0.
Rebecca Cran (4):
accel/tcg: Add TLB invalidation support for ranges of addresses
target/arm: Add support for FEAT_TLBIRANGE
target/arm: Add support for FEAT_TLBIOS
target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type
accel/tcg/cputlb.c | 128 ++++++-
include/exec/exec-all.h | 46 +++
target/arm/cpu.h | 10 +
target/arm/cpu64.c | 1 +
target/arm/helper.c | 371 ++++++++++++++++++++
5 files changed, 553 insertions(+), 3 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses
2021-05-05 3:04 [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS Rebecca Cran
@ 2021-05-05 3:04 ` Rebecca Cran
2021-05-08 15:55 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE Rebecca Cran
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Rebecca Cran @ 2021-05-05 3:04 UTC (permalink / raw)
To: Richard Henderson, Peter Maydell; +Cc: Rebecca Cran, qemu-devel, qemu-arm
Add functions to support the FEAT_TLBIRANGE ARMv8.4 feature that adds
TLB invalidation instructions to invalidate ranges of addresses.
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
---
accel/tcg/cputlb.c | 128 +++++++++++++++++++-
include/exec/exec-all.h | 46 +++++++
2 files changed, 171 insertions(+), 3 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8a7b779270a4..9381745f2528 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -709,7 +709,7 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
+static bool tlb_flush_page_bits_locked(CPUArchState *env, int midx,
target_ulong page, unsigned bits)
{
CPUTLBDesc *d = &env_tlb(env)->d[midx];
@@ -729,7 +729,7 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, page, mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
- return;
+ return true;
}
/* Check if we need to flush due to large pages. */
@@ -738,13 +738,14 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, d->large_page_addr, d->large_page_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
- return;
+ return true;
}
if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
tlb_n_used_entries_dec(env, midx);
}
tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
+ return false;
}
typedef struct {
@@ -943,6 +944,127 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
}
}
+typedef struct {
+ target_ulong addr;
+ target_ulong length;
+ uint16_t idxmap;
+ uint16_t bits;
+} TLBFlushPageRangeBitsByMMUIdxData;
+
+static void
+tlb_flush_page_range_bits_by_mmuidx_async_0(CPUState *cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ CPUArchState *env = cpu->env_ptr;
+ bool full_flush;
+ int mmu_idx;
+ target_ulong page;
+
+ assert_cpu_is_self(cpu);
+
+ tlb_debug("page addr:" TARGET_FMT_lx "/%u len: " TARGET_FMT_lx
+ " mmu_map:0x%x\n",
+ addr, bits, length, idxmap);
+
+ qemu_spin_lock(&env_tlb(env)->c.lock);
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ if ((idxmap >> mmu_idx) & 1) {
+ for (page = addr; page < (addr + length); page += TARGET_PAGE_SIZE) {
+ full_flush = tlb_flush_page_bits_locked(env, mmu_idx,
+ page, bits);
+ if (full_flush) {
+ break;
+ }
+ }
+ }
+ }
+ qemu_spin_unlock(&env_tlb(env)->c.lock);
+
+ for (page = addr; page < (addr + length); page += TARGET_PAGE_SIZE) {
+ tb_flush_jmp_cache(cpu, page);
+ }
+}
+
+static void
+tlb_flush_page_range_bits_by_mmuidx_async_1(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ TLBFlushPageRangeBitsByMMUIdxData *d = data.host_ptr;
+
+ tlb_flush_page_range_bits_by_mmuidx_async_0(cpu, d->addr, d->length,
+ d->idxmap, d->bits);
+
+ g_free(d);
+}
+
+void tlb_flush_page_range_bits_by_mmuidx(CPUState *cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ TLBFlushPageRangeBitsByMMUIdxData d;
+ TLBFlushPageRangeBitsByMMUIdxData *p;
+
+ /* This should already be page aligned */
+ addr &= TARGET_PAGE_BITS;
+
+ d.addr = addr & TARGET_PAGE_MASK;
+ d.idxmap = idxmap;
+ d.bits = bits;
+ d.length = length;
+
+ if (qemu_cpu_is_self(cpu)) {
+ tlb_flush_page_range_bits_by_mmuidx_async_0(cpu, addr, length,
+ idxmap, bits);
+ } else {
+ p = g_new(TLBFlushPageRangeBitsByMMUIdxData, 1);
+
+ /* Allocate a structure, freed by the worker. */
+ *p = d;
+ async_run_on_cpu(cpu, tlb_flush_page_range_bits_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+ }
+}
+
+void tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ TLBFlushPageRangeBitsByMMUIdxData d;
+ TLBFlushPageRangeBitsByMMUIdxData *p;
+ CPUState *dst_cpu;
+
+ /* This should already be page aligned */
+ addr &= TARGET_PAGE_BITS;
+
+ d.addr = addr;
+ d.idxmap = idxmap;
+ d.bits = bits;
+ d.length = length;
+
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ p = g_new(TLBFlushPageRangeBitsByMMUIdxData, 1);
+ *p = d;
+ async_run_on_cpu(dst_cpu,
+ tlb_flush_page_range_bits_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+ }
+ }
+
+ p = g_new(TLBFlushPageRangeBitsByMMUIdxData, 1);
+ *p = d;
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_range_bits_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 6b036cae8f65..a7ff35efb865 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -212,6 +212,37 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
*/
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
uint16_t idxmap);
+/**
+ * tlb_flush_page_range_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of start of page range to be flushed
+ * @length: the number of bytes to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush a range of pages from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_range_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong length, uint16_t idxmap,
+ unsigned bits);
+/**
+ * tlb_flush_page_range_by_mmuidx_all_cpus_synced:
+ * @cpu: Originating CPU of the flush
+ * @addr: virtual address of start of page range to be flushed
+ * @length: the number of bytes to be flushed
+ * @idxmap: bitmap of MMU indexes to flush
+ *
+ * Flush a range of pages from the TLB of all CPUs, for the specified MMU
+ * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
+ * vCPUs work is scheduled as safe work meaning all flushes will be
+ * complete once the source vCPUs safe work is complete. This will
+ * depend on when the guests translation ends the TB.
+ */
+void tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits);
/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
@@ -313,6 +344,21 @@ static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
target_ulong addr)
{
}
+static inline void tlb_flush_page_range_bits_by_mmuidx(CPUState *cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void
+tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong length,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
static inline void tlb_flush(CPUState *cpu)
{
}
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE
2021-05-05 3:04 [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses Rebecca Cran
@ 2021-05-05 3:04 ` Rebecca Cran
2021-05-08 16:39 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 4/4] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type Rebecca Cran
3 siblings, 1 reply; 8+ messages in thread
From: Rebecca Cran @ 2021-05-05 3:04 UTC (permalink / raw)
To: Richard Henderson, Peter Maydell; +Cc: Rebecca Cran, qemu-devel, qemu-arm
ARMv8.4 adds the mandatory FEAT_TLBIRANGE. It provides TLBI
maintenance instructions that apply to a range of input addresses.
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
---
target/arm/cpu.h | 5 +
target/arm/helper.c | 296 ++++++++++++++++++++
2 files changed, 301 insertions(+)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 616b39325347..5802798c3069 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4071,6 +4071,11 @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
}
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+}
+
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 9b1b98705f91..cb10851efda8 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4759,6 +4759,219 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMMMUIdxBit_SE3, bits);
}
+#ifdef TARGET_AARCH64
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
+ uint64_t value)
+{
+ unsigned int page_shift;
+ unsigned int page_size_granule;
+ uint64_t num;
+ uint64_t scale;
+ uint64_t exponent;
+ uint64_t length;
+
+ num = extract64(value, 39, 4);
+ scale = extract64(value, 44, 2);
+ page_size_granule = extract64(value, 46, 2);
+
+ page_shift = page_size_granule * 2 + 10;
+
+ if (page_size_granule == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+ page_size_granule);
+ return 0;
+ }
+
+ exponent = (5 * scale) + 1;
+ length = (num + 1) << (exponent + page_shift);
+
+ return length;
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL1&0.
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ ARMMMUIdx mmu_idx;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ mask = vae1_tlbmask(env);
+ mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
+ if (regime_has_2_ranges(mmu_idx)) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, mmu_idx, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length, mask,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+ * shareable specific flushes.
+ */
+ ARMMMUIdx mmu_idx;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ mask = vae1_tlbmask(env);
+ mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
+ if (regime_has_2_ranges(mmu_idx)) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, mmu_idx, pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL2.
+ * Currently handles all of RVAE2 and RVALE2,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ bool secure;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ secure = arm_is_secure_below_el3(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
+ bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
+ pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length, mask,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer shareable specific flushes.
+ */
+ bool secure;
+ int mask;
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ secure = arm_is_secure_below_el3(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
+ bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
+ pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length, mask,
+ bits);
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3.
+ * Currently handles all of RVAE3 and RVALE3,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length,
+ ARMMMUIdxBit_SE3,
+ bits);
+ } else {
+ tlb_flush_page_range_bits_by_mmuidx(cs, pageaddr, length,
+ ARMMMUIdxBit_SE3,
+ bits);
+ }
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer specific flushes.
+ */
+ int bits;
+ uint64_t pageaddr;
+ uint64_t length;
+
+ CPUState *cs = env_cpu(env);
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+
+ tlb_flush_page_range_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ length,
+ ARMMMUIdxBit_SE3,
+ bits);
+}
+#endif
+
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6920,6 +7133,86 @@ static const ARMCPRegInfo pauth_reginfo[] = {
REGINFO_SENTINEL
};
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ REGINFO_SENTINEL
+};
+
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
{
Error *err = NULL;
@@ -8289,6 +8582,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rndr, cpu)) {
define_arm_cp_regs(cpu, rndr_reginfo);
}
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
+ }
#ifndef CONFIG_USER_ONLY
/* Data Cache clean instructions up to PoP */
if (cpu_isar_feature(aa64_dcpop, cpu)) {
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS
2021-05-05 3:04 [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE Rebecca Cran
@ 2021-05-05 3:04 ` Rebecca Cran
2021-05-08 16:46 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 4/4] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type Rebecca Cran
3 siblings, 1 reply; 8+ messages in thread
From: Rebecca Cran @ 2021-05-05 3:04 UTC (permalink / raw)
To: Richard Henderson, Peter Maydell; +Cc: Rebecca Cran, qemu-devel, qemu-arm
ARMv8.4 adds the mandatory FEAT_TLBIOS. It provides TLBI
maintenance instructions that extend to the Outer Shareable domain.
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
---
target/arm/cpu.h | 5 ++
target/arm/helper.c | 75 ++++++++++++++++++++
2 files changed, 80 insertions(+)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 5802798c3069..7986a217acdd 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4076,6 +4076,11 @@ static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
}
+static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
+}
+
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index cb10851efda8..04c4d766adb9 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7213,6 +7213,78 @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
REGINFO_SENTINEL
};
+static const ARMCPRegInfo tlbios_reginfo[] = {
+ { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ REGINFO_SENTINEL
+};
+
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
{
Error *err = NULL;
@@ -8585,6 +8657,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_tlbirange, cpu)) {
define_arm_cp_regs(cpu, tlbirange_reginfo);
}
+ if (cpu_isar_feature(aa64_tlbios, cpu)) {
+ define_arm_cp_regs(cpu, tlbios_reginfo);
+ }
#ifndef CONFIG_USER_ONLY
/* Data Cache clean instructions up to PoP */
if (cpu_isar_feature(aa64_dcpop, cpu)) {
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v8 4/4] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type
2021-05-05 3:04 [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS Rebecca Cran
` (2 preceding siblings ...)
2021-05-05 3:04 ` [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS Rebecca Cran
@ 2021-05-05 3:04 ` Rebecca Cran
3 siblings, 0 replies; 8+ messages in thread
From: Rebecca Cran @ 2021-05-05 3:04 UTC (permalink / raw)
To: Richard Henderson, Peter Maydell; +Cc: Rebecca Cran, qemu-devel, qemu-arm
Indicate support for FEAT_TLBIOS and FEAT_TLBIRANGE by setting
ID_AA64ISAR0.TLB to 2 for the max AARCH64 CPU type.
Signed-off-by: Rebecca Cran <rebecca@nuviainc.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/cpu64.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index f0a9e968c9c1..f42803ecaf1d 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -651,6 +651,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
cpu->isar.id_aa64isar0 = t;
--
2.26.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses
2021-05-05 3:04 ` [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses Rebecca Cran
@ 2021-05-08 15:55 ` Richard Henderson
0 siblings, 0 replies; 8+ messages in thread
From: Richard Henderson @ 2021-05-08 15:55 UTC (permalink / raw)
To: Rebecca Cran, Peter Maydell; +Cc: qemu-arm, qemu-devel
On 5/4/21 8:04 PM, Rebecca Cran wrote:
> Add functions to support the FEAT_TLBIRANGE ARMv8.4 feature that adds
> TLB invalidation instructions to invalidate ranges of addresses.
>
> Signed-off-by: Rebecca Cran<rebecca@nuviainc.com>
> ---
> accel/tcg/cputlb.c | 128 +++++++++++++++++++-
> include/exec/exec-all.h | 46 +++++++
> 2 files changed, 171 insertions(+), 3 deletions(-)
I guess this is ok.
I would have switched things around such that tlb_flush_page_bits_locked used
tlb_flush_range_locked, passing TARGET_PAGE_SIZE for the length.
I would check for a very long length and flush the whole tlb, lest we spend too
long going round and round the same set of tlb entries.
I would not skip the third function in the triple, *_all_cpus, which is unused
by arm but we have for every other tlb flushing function in this set.
I'll whip something up and post it.
r~
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE
2021-05-05 3:04 ` [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE Rebecca Cran
@ 2021-05-08 16:39 ` Richard Henderson
0 siblings, 0 replies; 8+ messages in thread
From: Richard Henderson @ 2021-05-08 16:39 UTC (permalink / raw)
To: Rebecca Cran, Peter Maydell; +Cc: qemu-arm, qemu-devel
On 5/4/21 8:04 PM, Rebecca Cran wrote:
> +static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
> + uint64_t value)
> +{
> + unsigned int page_shift;
> + unsigned int page_size_granule;
> + uint64_t num;
> + uint64_t scale;
> + uint64_t exponent;
> + uint64_t length;
> +
> + num = extract64(value, 39, 4);
> + scale = extract64(value, 44, 2);
> + page_size_granule = extract64(value, 46, 2);
> +
> + page_shift = page_size_granule * 2 + 10;
Should be + 12, for the sequence 12, 14, 16 (4k, 16k, 64k).
> +static void tlbi_aa64_rvae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
> + uint64_t value)
> +{
> + /*
> + * Invalidate by VA range, EL1&0.
> + * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
> + * since we don't support flush-for-specific-ASID-only or
> + * flush-last-level-only.
> + */
> + ARMMMUIdx mmu_idx;
> + int mask;
> + int bits;
> + uint64_t pageaddr;
> + uint64_t length;
> +
> + CPUState *cs = env_cpu(env);
> + mask = vae1_tlbmask(env);
> + mmu_idx = ARM_MMU_IDX_A | ctz32(mask);
> + if (regime_has_2_ranges(mmu_idx)) {
> + pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
> + } else {
> + pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
> + }
Let's extract the base address via a helper as well. Add
/* TODO: ARMv8.7 FEAT_LPA2 */
as that will change the extracted base address.
I think there's enough replicated between these functions to want a common
function. Something like
static void do_rvae_write(CPUARMState *env, uint64_t value,
int idxmap, bool synced)
{
ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
bool two_ranges = regime_has_2_ranges(one_idx);
uint64_t baseaddr, length;
int bits;
baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
length = tlb_aa64_range_get_length(env, value);
bits = tlbbits_for_regime(env, one_idx, baseaddr);
if (synced) {
tlb_flush_range_by_mmuidx_all_cpus_synced(...);
} else {
tlb_flush_range_by_mmuidx(...);
}
}
static void tlbi_aa64_rvae1_write(...)
{
do_rvae_write(env, value, vae1_tlbmask(env),
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae1is_write(...)
{
do_rvae_write(env, value, vae1_tlbmask(env), true);
}
static int vae2_tlbmask(CPUARMState *env)
{
return (arm_is_secure_below_el3(env)
? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
}
static void tlbi_aa64_rvae2_write(...)
{
do_rvae_write(env, value, vae2_tlbmask(env),
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae2is_write(...)
{
do_rvae_write(env, value, vae2_tlbmask(env), true);
}
static void tlbi_aa64_rvae3_write(...)
{
do_rvae_write(env, value, ARMMMUIdxBit_SE3,
tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(...)
{
do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
}
r~
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS
2021-05-05 3:04 ` [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS Rebecca Cran
@ 2021-05-08 16:46 ` Richard Henderson
0 siblings, 0 replies; 8+ messages in thread
From: Richard Henderson @ 2021-05-08 16:46 UTC (permalink / raw)
To: Rebecca Cran, Peter Maydell; +Cc: qemu-arm, qemu-devel
On 5/4/21 8:04 PM, Rebecca Cran wrote:
> +static const ARMCPRegInfo tlbios_reginfo[] = {
> + { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
> + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
> + .access = PL1_W, .type = ARM_CP_NO_RAW,
> + .writefn = tlbi_aa64_vmalle1is_write },
> + { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
> + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
> + .access = PL1_W, .type = ARM_CP_NO_RAW,
> + .writefn = tlbi_aa64_vmalle1is_write },
> + { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
> + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
> + .access = PL1_W, .type = ARM_CP_NO_RAW,
> + .writefn = tlbi_aa64_rvae1is_write },
All of the RVAE*OS entries should be in the tlbirange table.
r~
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2021-05-08 16:47 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-05 3:04 [PATCH v8 0/4] aarch64: add support for FEAT_TLBIRANGE and FEAT_TLBIOS Rebecca Cran
2021-05-05 3:04 ` [PATCH v8 1/4] accel/tcg: Add TLB invalidation support for ranges of addresses Rebecca Cran
2021-05-08 15:55 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 2/4] target/arm: Add support for FEAT_TLBIRANGE Rebecca Cran
2021-05-08 16:39 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 3/4] target/arm: Add support for FEAT_TLBIOS Rebecca Cran
2021-05-08 16:46 ` Richard Henderson
2021-05-05 3:04 ` [PATCH v8 4/4] target/arm: set ID_AA64ISAR0.TLB to 2 for max AARCH64 CPU type Rebecca Cran
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).