qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Alex Bennée" <alex.bennee@linaro.org>
To: Alvise Rigo <a.rigo@virtualopensystems.com>
Cc: mttcg@listserver.greensocs.com, claudio.fontana@huawei.com,
	qemu-devel@nongnu.org, pbonzini@redhat.com,
	jani.kokkonen@huawei.com, tech@virtualopensystems.com,
	rth@twiddle.net
Subject: Re: [Qemu-devel] [RFC v7 08/16] softmmu: Honor the new exclusive bitmap
Date: Tue, 16 Feb 2016 17:39:38 +0000	[thread overview]
Message-ID: <87a8n0y2p1.fsf@linaro.org> (raw)
In-Reply-To: <1454059965-23402-9-git-send-email-a.rigo@virtualopensystems.com>


Alvise Rigo <a.rigo@virtualopensystems.com> writes:

> The pages set as exclusive (clean) in the DIRTY_MEMORY_EXCLUSIVE bitmap
> have to have their TLB entries flagged with TLB_EXCL. The accesses to
> pages with TLB_EXCL flag set have to be properly handled in that they
> can potentially invalidate an open LL/SC transaction.
>
> Modify the TLB entries generation to honor the new bitmap and extend
> the softmmu_template to handle the accesses made to guest pages marked
> as exclusive.
>
> In the case we remove a TLB entry marked as EXCL, we unset the
> corresponding exclusive bit in the bitmap.
>
> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
>  cputlb.c           | 44 ++++++++++++++++++++++++++++--
>  softmmu_template.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++------
>  2 files changed, 113 insertions(+), 11 deletions(-)
>
> diff --git a/cputlb.c b/cputlb.c
> index ce6d720..aa9cc17 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -395,6 +395,16 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>      env->tlb_v_table[mmu_idx][vidx] = *te;
>      env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
>
> +    if (unlikely(!(te->addr_write & TLB_MMIO) && (te->addr_write & TLB_EXCL))) {
> +        /* We are removing an exclusive entry, set the page to dirty. This
> +         * is not be necessary if the vCPU has performed both SC and LL. */
> +        hwaddr hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) +
> +                                          (te->addr_write & TARGET_PAGE_MASK);
> +        if (!cpu->ll_sc_context) {
> +            cpu_physical_memory_unset_excl(hw_addr);
> +        }
> +    }
> +

I'm confused by the later patches removing this code and its comments
about missing the setting of flags.

>      /* refill the tlb */
>      env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
>      env->iotlb[mmu_idx][index].attrs = attrs;
> @@ -418,9 +428,19 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>          } else if (memory_region_is_ram(section->mr)
>                     && cpu_physical_memory_is_clean(section->mr->ram_addr
>                                                     + xlat)) {
> -            te->addr_write = address | TLB_NOTDIRTY;
> -        } else {
> -            te->addr_write = address;
> +            address |= TLB_NOTDIRTY;
> +        }
> +
> +        /* Since the MMIO accesses follow always the slow path, we do not need
> +         * to set any flag to trap the access */
> +        if (!(address & TLB_MMIO)) {
> +            if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
> +                /* There is at least one vCPU that has flagged the address as
> +                 * exclusive. */
> +                te->addr_write = address | TLB_EXCL;
> +            } else {
> +                te->addr_write = address;
> +            }

Again this is confusing when following patches blat over the code.
Perhaps this part of the patch should be:

        /* Since the MMIO accesses follow always the slow path, we do not need
         * to set any flag to trap the access */
        if (!(address & TLB_MMIO)) {
            if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
                /* There is at least one vCPU that has flagged the address as
                 * exclusive. */
                address |= TLB_EXCL;
            }
        }
        te->addr_write = address;

So the future patch is clearer about what it does?

>          }
>      } else {
>          te->addr_write = -1;
> @@ -474,6 +494,24 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
>      return qemu_ram_addr_from_host_nofail(p);
>  }
>
> +/* For every vCPU compare the exclusive address and reset it in case of a
> + * match. Since only one vCPU is running at once, no lock has to be held to
> + * guard this operation. */
> +static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
> +{
> +    CPUState *cpu;
> +
> +    CPU_FOREACH(cpu) {
> +        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
> +            ranges_overlap(cpu->excl_protected_range.begin,
> +                           cpu->excl_protected_range.end -
> +                           cpu->excl_protected_range.begin,
> +                           addr, size)) {
> +            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
> +        }
> +    }
> +}
> +
>  #define MMUSUFFIX _mmu
>
>  /* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
> diff --git a/softmmu_template.h b/softmmu_template.h
> index 4332db2..267c52a 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -474,11 +474,43 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }
>
> -    /* Handle an IO access.  */
> +    /* Handle an IO access or exclusive access.  */
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
> -        glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
> -                                                 mmu_idx, index, retaddr);
> -        return;
> +        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {

>From here:

> +            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
> +            CPUState *cpu = ENV_GET_CPU(env);
> +            CPUClass *cc = CPU_GET_CLASS(cpu);
> +            /* The slow-path has been forced since we are writing to
> +             * exclusive-protected memory. */
> +            hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> +
> +            /* The function lookup_and_reset_cpus_ll_addr could have reset the
> +             * exclusive address. Fail the SC in this case.
> +             * N.B.: here excl_succeed == true means that the caller is
> +             * helper_stcond_name in softmmu_llsc_template.
> +             * On the contrary, excl_succeeded == false occurs when a VCPU is
> +             * writing through normal store to a page with TLB_EXCL bit set. */
> +            if (cpu->excl_succeeded) {
> +                if (!cc->cpu_valid_excl_access(cpu, hw_addr, DATA_SIZE)) {
> +                    /* The vCPU is SC-ing to an unprotected address. */
> +                    cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
> +                    cpu->excl_succeeded = false;
> +
> +                    return;
> +                }
> +            }
> +

To here is repeated code later on. It would be better to have a common
chunk of logic.

> +            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
> +                                                    mmu_idx, index, retaddr);
> +
> +            lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);

In fact if the endianess is passed to the inline function you could have
a call that was:

        if (tlb_addr & TLB_EXCL) {
           glue(helper_st_name, _do_excl)(true, env, val, addr, oi, mmu_idx,
                                              index, retaddr);
        }

and

        if (tlb_addr & TLB_EXCL) {
           glue(helper_st_name, _do_excl)(false, env, val, addr, oi, mmu_idx,
                                              index, retaddr);
        }

later. Then future patches would just extend the single helper.

> +
> +            return;
> +        } else {
> +            glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
> +                                                     mmu_idx, index, retaddr);
> +            return;
> +        }
>      }
>
>      glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index,
> @@ -586,11 +618,43 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }
>
> -    /* Handle an IO access.  */
> +    /* Handle an IO access or exclusive access.  */
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
> -        glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
> -                                                 mmu_idx, index, retaddr);
> -        return;
> +        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
> +            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
> +            CPUState *cpu = ENV_GET_CPU(env);
> +            CPUClass *cc = CPU_GET_CLASS(cpu);
> +            /* The slow-path has been forced since we are writing to
> +             * exclusive-protected memory. */
> +            hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> +
> +            /* The function lookup_and_reset_cpus_ll_addr could have reset the
> +             * exclusive address. Fail the SC in this case.
> +             * N.B.: here excl_succeed == true means that the caller is
> +             * helper_stcond_name in softmmu_llsc_template.
> +             * On the contrary, excl_succeeded == false occurs when a VCPU is
> +             * writing through normal store to a page with TLB_EXCL bit set. */
> +            if (cpu->excl_succeeded) {
> +                if (!cc->cpu_valid_excl_access(cpu, hw_addr, DATA_SIZE)) {
> +                    /* The vCPU is SC-ing to an unprotected address. */
> +                    cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
> +                    cpu->excl_succeeded = false;
> +
> +                    return;
> +                }
> +            }
> +
> +            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
> +                                                    mmu_idx, index, retaddr);
> +
> +            lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
> +
> +            return;
> +        } else {
> +            glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
> +                                                     mmu_idx, index, retaddr);
> +            return;
> +        }
>      }
>
>      glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index,


--
Alex Bennée

  reply	other threads:[~2016-02-16 17:39 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-29  9:32 [Qemu-devel] [RFC v7 00/16] Slow-path for atomic instruction translation Alvise Rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 01/16] exec.c: Add new exclusive bitmap to ram_list Alvise Rigo
2016-02-11 13:00   ` Alex Bennée
2016-02-11 13:21     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 02/16] softmmu: Simplify helper_*_st_name, wrap unaligned code Alvise Rigo
2016-02-11 13:07   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 03/16] softmmu: Simplify helper_*_st_name, wrap MMIO code Alvise Rigo
2016-02-11 13:15   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 04/16] softmmu: Simplify helper_*_st_name, wrap RAM code Alvise Rigo
2016-02-11 13:18   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 05/16] softmmu: Add new TLB_EXCL flag Alvise Rigo
2016-02-11 13:18   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 06/16] qom: cpu: Add CPUClass hooks for exclusive range Alvise Rigo
2016-02-11 13:22   ` Alex Bennée
2016-02-18 13:53     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 07/16] softmmu: Add helpers for a new slowpath Alvise Rigo
2016-02-11 16:33   ` Alex Bennée
2016-02-18 13:58     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 08/16] softmmu: Honor the new exclusive bitmap Alvise Rigo
2016-02-16 17:39   ` Alex Bennée [this message]
2016-02-18 14:18     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 09/16] softmmu: Include MMIO/invalid exclusive accesses Alvise Rigo
2016-02-16 17:49   ` Alex Bennée
2016-02-18 14:18     ` alvise rigo
2016-02-18 16:26       ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 10/16] softmmu: Protect MMIO exclusive range Alvise Rigo
2016-02-17 18:55   ` Alex Bennée
2016-02-18 14:15     ` alvise rigo
2016-02-18 16:25       ` Alex Bennée
2016-03-07 18:13         ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 11/16] tcg: Create new runtime helpers for excl accesses Alvise Rigo
2016-02-18 16:16   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 12/16] configure: Use slow-path for atomic only when the softmmu is enabled Alvise Rigo
2016-02-18 16:40   ` Alex Bennée
2016-02-18 16:43     ` Alex Bennée
2016-03-07 17:21     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 13/16] softmmu: Add history of excl accesses Alvise Rigo
2016-02-16 17:07   ` Alex Bennée
2016-02-18 14:17     ` alvise rigo
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 14/16] target-arm: translate: Use ld/st excl for atomic insns Alvise Rigo
2016-02-18 17:02   ` Alex Bennée
2016-03-07 18:39     ` alvise rigo
2016-03-07 20:06       ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 15/16] target-arm: cpu64: use custom set_excl hook Alvise Rigo
2016-02-18 18:19   ` Alex Bennée
2016-01-29  9:32 ` [Qemu-devel] [RFC v7 16/16] target-arm: aarch64: add atomic instructions Alvise Rigo
2016-02-19 11:34   ` Alex Bennée
2016-02-19 11:44 ` [Qemu-devel] [RFC v7 00/16] Slow-path for atomic instruction translation Alex Bennée
2016-02-19 12:01   ` alvise rigo
2016-02-19 12:19     ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87a8n0y2p1.fsf@linaro.org \
    --to=alex.bennee@linaro.org \
    --cc=a.rigo@virtualopensystems.com \
    --cc=claudio.fontana@huawei.com \
    --cc=jani.kokkonen@huawei.com \
    --cc=mttcg@listserver.greensocs.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=tech@virtualopensystems.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).