From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:55702) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a8Ohn-0005kq-Fn for qemu-devel@nongnu.org; Mon, 14 Dec 2015 03:42:04 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1a8Ohm-0001we-2i for qemu-devel@nongnu.org; Mon, 14 Dec 2015 03:42:03 -0500 Received: from mail-wm0-x22f.google.com ([2a00:1450:400c:c09::22f]:34037) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1a8Ohl-0001wN-Qx for qemu-devel@nongnu.org; Mon, 14 Dec 2015 03:42:02 -0500 Received: by mail-wm0-x22f.google.com with SMTP id p66so50842909wmp.1 for ; Mon, 14 Dec 2015 00:42:01 -0800 (PST) From: Alvise Rigo Date: Mon, 14 Dec 2015 09:41:37 +0100 Message-Id: <1450082498-27109-14-git-send-email-a.rigo@virtualopensystems.com> In-Reply-To: <1450082498-27109-1-git-send-email-a.rigo@virtualopensystems.com> References: <1450082498-27109-1-git-send-email-a.rigo@virtualopensystems.com> Subject: [Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive accesses List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com Cc: claudio.fontana@huawei.com, pbonzini@redhat.com, jani.kokkonen@huawei.com, tech@virtualopensystems.com, alex.bennee@linaro.org, rth@twiddle.net Enable exclusive accesses when the MMIO/invalid flag is set in the TLB entry. In case a LL access is done to MMIO memory, we treat it differently from a RAM access in that we do not rely on the EXCL bitmap to flag the page as exclusive. In fact, we don't even need the TLB_EXCL flag to force the slow path, since it is always forced anyway. This commit does not take care of invalidating an MMIO exclusive range from other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and CPU2 writes to X. This will be addressed in the following commit. Suggested-by: Jani Kokkonen Suggested-by: Claudio Fontana Signed-off-by: Alvise Rigo --- cputlb.c | 20 +++++++++++--------- softmmu_llsc_template.h | 25 ++++++++++++++----------- softmmu_template.h | 38 ++++++++++++++++++++------------------ 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/cputlb.c b/cputlb.c index 372877e..7c2669c 100644 --- a/cputlb.c +++ b/cputlb.c @@ -413,22 +413,24 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, if ((memory_region_is_ram(section->mr) && section->readonly) || memory_region_is_romd(section->mr)) { /* Write access calls the I/O callback. */ - te->addr_write = address | TLB_MMIO; + address |= TLB_MMIO; } else if (memory_region_is_ram(section->mr) && cpu_physical_memory_is_clean(section->mr->ram_addr + xlat)) { - te->addr_write = address | TLB_NOTDIRTY; - } else { - if (!(address & TLB_MMIO) && - cpu_physical_memory_atleast_one_excl(section->mr->ram_addr - + xlat)) { + address |= TLB_NOTDIRTY; + } + + /* Since the MMIO accesses follow always the slow path, we do not need + * to set any flag to trap the access */ + if (!(address & TLB_MMIO)) { + if (cpu_physical_memory_atleast_one_excl( + section->mr->ram_addr + xlat)) { /* There is at least one vCPU that has flagged the address as * exclusive. */ - te->addr_write = address | TLB_EXCL; - } else { - te->addr_write = address; + address |= TLB_EXCL; } } + te->addr_write = address; } else { te->addr_write = -1; } diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h index becb90b..bbc820e 100644 --- a/softmmu_llsc_template.h +++ b/softmmu_llsc_template.h @@ -71,17 +71,20 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr, * plus the offset (i.e. addr & ~TARGET_PAGE_MASK) */ hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr; - cpu_physical_memory_set_excl(hw_addr, this->cpu_index); - excl_history_put_addr(this, hw_addr); - /* If all the vCPUs have the EXCL bit set for this page there is no need - * to request any flush. */ - if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) { - CPU_FOREACH(cpu) { - if (current_cpu != cpu) { - if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) { - cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index); - tlb_flush(cpu, 1); - excl_history_put_addr(cpu, hw_addr); + /* No need to flush for MMIO addresses, the slow path is always used */ + if (likely(!(env->tlb_table[mmu_idx][index].addr_read & TLB_MMIO))) { + cpu_physical_memory_set_excl(hw_addr, this->cpu_index); + excl_history_put_addr(this, hw_addr); + /* If all the vCPUs have the EXCL bit set for this page there is no need + * to request any flush. */ + if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) { + CPU_FOREACH(cpu) { + if (current_cpu != cpu) { + if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) { + cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index); + tlb_flush(cpu, 1); + excl_history_put_addr(cpu, hw_addr); + } } } } diff --git a/softmmu_template.h b/softmmu_template.h index 262c95f..196beec 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -476,9 +476,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access or exclusive access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - - if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) { + if (tlb_addr & TLB_EXCL) { + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; CPUState *cpu = ENV_GET_CPU(env); /* The slow-path has been forced since we are writing to * exclusive-protected memory. */ @@ -500,12 +499,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index); } - haddr = addr + env->tlb_table[mmu_idx][index].addend; - #if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); - #else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); - #endif + if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */ + glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi, + mmu_idx, index, + retaddr); + } else { + glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, + mmu_idx, index,retaddr); + } lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE); @@ -624,9 +625,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access or exclusive access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; - - if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) { + if (tlb_addr & TLB_EXCL) { + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; CPUState *cpu = ENV_GET_CPU(env); /* The slow-path has been forced since we are writing to * exclusive-protected memory. */ @@ -648,12 +648,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index); } - haddr = addr + env->tlb_table[mmu_idx][index].addend; - #if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); - #else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); - #endif + if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */ + glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi, + mmu_idx, index, + retaddr); + } else { + glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, + mmu_idx, index,retaddr); + } lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE); -- 2.6.4