All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alvise Rigo <a.rigo@virtualopensystems.com>
To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com
Cc: claudio.fontana@huawei.com, pbonzini@redhat.com,
	jani.kokkonen@huawei.com, tech@virtualopensystems.com,
	alex.bennee@linaro.org, rth@twiddle.net
Subject: [Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive accesses
Date: Mon, 14 Dec 2015 09:41:37 +0100	[thread overview]
Message-ID: <1450082498-27109-14-git-send-email-a.rigo@virtualopensystems.com> (raw)
In-Reply-To: <1450082498-27109-1-git-send-email-a.rigo@virtualopensystems.com>

Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
entry.
In case a LL access is done to MMIO memory, we treat it differently from
a RAM access in that we do not rely on the EXCL bitmap to flag the page
as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
slow path, since it is always forced anyway.

This commit does not take care of invalidating an MMIO exclusive range from
other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
CPU2 writes to X. This will be addressed in the following commit.

Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 cputlb.c                | 20 +++++++++++---------
 softmmu_llsc_template.h | 25 ++++++++++++++-----------
 softmmu_template.h      | 38 ++++++++++++++++++++------------------
 3 files changed, 45 insertions(+), 38 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 372877e..7c2669c 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -413,22 +413,24 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
         if ((memory_region_is_ram(section->mr) && section->readonly)
             || memory_region_is_romd(section->mr)) {
             /* Write access calls the I/O callback.  */
-            te->addr_write = address | TLB_MMIO;
+            address |= TLB_MMIO;
         } else if (memory_region_is_ram(section->mr)
                    && cpu_physical_memory_is_clean(section->mr->ram_addr
                                                    + xlat)) {
-            te->addr_write = address | TLB_NOTDIRTY;
-        } else {
-            if (!(address & TLB_MMIO) &&
-                cpu_physical_memory_atleast_one_excl(section->mr->ram_addr
-                                                           + xlat)) {
+            address |= TLB_NOTDIRTY;
+        }
+
+        /* Since the MMIO accesses follow always the slow path, we do not need
+         * to set any flag to trap the access */
+        if (!(address & TLB_MMIO)) {
+            if (cpu_physical_memory_atleast_one_excl(
+                                        section->mr->ram_addr + xlat)) {
                 /* There is at least one vCPU that has flagged the address as
                  * exclusive. */
-                te->addr_write = address | TLB_EXCL;
-            } else {
-                te->addr_write = address;
+                address |= TLB_EXCL;
             }
         }
+        te->addr_write = address;
     } else {
         te->addr_write = -1;
     }
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index becb90b..bbc820e 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -71,17 +71,20 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
      * plus the offset (i.e. addr & ~TARGET_PAGE_MASK) */
     hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr;
 
-    cpu_physical_memory_set_excl(hw_addr, this->cpu_index);
-    excl_history_put_addr(this, hw_addr);
-    /* If all the vCPUs have the EXCL bit set for this page there is no need
-     * to request any flush. */
-    if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) {
-        CPU_FOREACH(cpu) {
-            if (current_cpu != cpu) {
-                if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) {
-                    cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index);
-                    tlb_flush(cpu, 1);
-                    excl_history_put_addr(cpu, hw_addr);
+    /* No need to flush for MMIO addresses, the slow path is always used */
+    if (likely(!(env->tlb_table[mmu_idx][index].addr_read & TLB_MMIO))) {
+        cpu_physical_memory_set_excl(hw_addr, this->cpu_index);
+        excl_history_put_addr(this, hw_addr);
+        /* If all the vCPUs have the EXCL bit set for this page there is no need
+         * to request any flush. */
+        if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) {
+            CPU_FOREACH(cpu) {
+                if (current_cpu != cpu) {
+                    if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) {
+                        cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index);
+                        tlb_flush(cpu, 1);
+                        excl_history_put_addr(cpu, hw_addr);
+                    }
                 }
             }
         }
diff --git a/softmmu_template.h b/softmmu_template.h
index 262c95f..196beec 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -476,9 +476,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
-
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
+            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             /* The slow-path has been forced since we are writing to
              * exclusive-protected memory. */
@@ -500,12 +499,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index);
             }
 
-            haddr = addr + env->tlb_table[mmu_idx][index].addend;
-        #if DATA_SIZE == 1
-            glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-        #else
-            glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-        #endif
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, index,retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
 
@@ -624,9 +625,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
-
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
+            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             /* The slow-path has been forced since we are writing to
              * exclusive-protected memory. */
@@ -648,12 +648,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index);
             }
 
-            haddr = addr + env->tlb_table[mmu_idx][index].addend;
-        #if DATA_SIZE == 1
-            glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-        #else
-            glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-        #endif
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, index,retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
 
-- 
2.6.4

  parent reply	other threads:[~2015-12-14  8:42 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-14  8:41 [Qemu-devel] [RFC v6 00/14] Slow-path for atomic instruction translation Alvise Rigo
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 01/14] exec.c: Add new exclusive bitmap to ram_list Alvise Rigo
2015-12-18 13:18   ` Alex Bennée
2015-12-18 13:47     ` alvise rigo
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 02/14] softmmu: Add new TLB_EXCL flag Alvise Rigo
2016-01-05 16:10   ` Alex Bennée
2016-01-05 17:27     ` alvise rigo
2016-01-05 18:39       ` Alex Bennée
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 03/14] Add CPUClass hook to set exclusive range Alvise Rigo
2016-01-05 16:42   ` Alex Bennée
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 04/14] softmmu: Add helpers for a new slowpath Alvise Rigo
2016-01-06 15:16   ` Alex Bennée
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 05/14] tcg: Create new runtime helpers for excl accesses Alvise Rigo
2015-12-14  9:40   ` Paolo Bonzini
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 06/14] configure: Use slow-path for atomic only when the softmmu is enabled Alvise Rigo
2015-12-14  9:38   ` Paolo Bonzini
2015-12-14  9:39     ` Paolo Bonzini
2015-12-14 10:14   ` Laurent Vivier
2015-12-15 14:23     ` alvise rigo
2015-12-15 14:31       ` Paolo Bonzini
2015-12-15 15:18         ` Laurent Vivier
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl for atomic insns Alvise Rigo
2016-01-06 17:11   ` Alex Bennée
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 08/14] target-arm: Add atomic_clear helper for CLREX insn Alvise Rigo
2016-01-06 17:13   ` Alex Bennée
2016-01-06 17:27     ` alvise rigo
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 09/14] softmmu: Add history of excl accesses Alvise Rigo
2015-12-14  9:35   ` Paolo Bonzini
2015-12-15 14:26     ` alvise rigo
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 10/14] softmmu: Simplify helper_*_st_name, wrap unaligned code Alvise Rigo
2016-01-07 14:46   ` Alex Bennée
2016-01-07 15:09     ` alvise rigo
2016-01-07 16:35       ` Alex Bennée
2016-01-07 16:54         ` alvise rigo
2016-01-07 17:36           ` Alex Bennée
2016-01-08 11:19   ` Alex Bennée
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 11/14] softmmu: Simplify helper_*_st_name, wrap MMIO code Alvise Rigo
2016-01-11  9:54   ` Alex Bennée
2016-01-11 10:19     ` alvise rigo
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 12/14] softmmu: Simplify helper_*_st_name, wrap RAM code Alvise Rigo
2015-12-17 16:52   ` Alex Bennée
2015-12-17 17:13     ` alvise rigo
2015-12-17 20:20       ` Alex Bennée
2015-12-14  8:41 ` Alvise Rigo [this message]
2015-12-14  8:41 ` [Qemu-devel] [RFC v6 14/14] softmmu: Protect MMIO exclusive range Alvise Rigo
2015-12-14  9:33 ` [Qemu-devel] [RFC v6 00/14] Slow-path for atomic instruction translation Paolo Bonzini
2015-12-14 10:04   ` alvise rigo
2015-12-14 10:17     ` Paolo Bonzini
2015-12-15 13:59       ` alvise rigo
2015-12-15 14:18         ` Paolo Bonzini
2015-12-15 14:22           ` alvise rigo
2015-12-14 22:09 ` Andreas Tobler
2015-12-15  8:16   ` alvise rigo
2015-12-17 16:06 ` Alex Bennée
2015-12-17 16:16   ` alvise rigo
2016-01-06 18:00 ` Andrew Baumann
2016-01-07 10:21   ` alvise rigo
2016-01-07 10:22     ` Peter Maydell
2016-01-07 10:49       ` alvise rigo
2016-01-07 11:16         ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1450082498-27109-14-git-send-email-a.rigo@virtualopensystems.com \
    --to=a.rigo@virtualopensystems.com \
    --cc=alex.bennee@linaro.org \
    --cc=claudio.fontana@huawei.com \
    --cc=jani.kokkonen@huawei.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=tech@virtualopensystems.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.