All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Philippe Mathieu-Daudé" <philmd@redhat.com>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Cc: cota@braap.org
Subject: Re: [Qemu-devel] [PATCH 01/10] cputlb: Move tlb_lock to CPUTLBCommon
Date: Tue, 23 Oct 2018 13:03:49 +0200	[thread overview]
Message-ID: <34f9dd7d-a5e2-9e71-c931-edbf42ec550d@redhat.com> (raw)
In-Reply-To: <20181023070253.6407-2-richard.henderson@linaro.org>

On 23/10/18 9:02, Richard Henderson wrote:
> This is the first of several moves to reduce the size of the
> CPU_COMMON_TLB macro and improve some locality of refernce.

"reference"

> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>

> ---
>   include/exec/cpu-defs.h | 17 ++++++++++++---
>   accel/tcg/cputlb.c      | 48 ++++++++++++++++++++---------------------
>   2 files changed, 38 insertions(+), 27 deletions(-)
> 
> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
> index 4ff62f32bf..9005923b4d 100644
> --- a/include/exec/cpu-defs.h
> +++ b/include/exec/cpu-defs.h
> @@ -141,10 +141,21 @@ typedef struct CPUIOTLBEntry {
>       MemTxAttrs attrs;
>   } CPUIOTLBEntry;
>   
> +/*
> + * Data elements that are shared between all MMU modes.
> + */
> +typedef struct CPUTLBCommon {
> +    /* lock serializes updates to tlb_table and tlb_v_table */
> +    QemuSpin lock;
> +} CPUTLBCommon;
> +
> +/*
> + * The meaning of each of the MMU modes is defined in the target code.
> + * Note that NB_MMU_MODES is not yet defined; we can only reference it
> + * within preprocessor defines that will be expanded later.
> + */
>   #define CPU_COMMON_TLB \
> -    /* The meaning of the MMU modes is defined in the target code. */   \
> -    /* tlb_lock serializes updates to tlb_table and tlb_v_table */      \
> -    QemuSpin tlb_lock;                                                  \
> +    CPUTLBCommon tlb_c;                                                 \
>       CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
>       CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE];               \
>       CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];                    \
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index af57aca5e4..d4e07056be 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -78,7 +78,7 @@ void tlb_init(CPUState *cpu)
>   {
>       CPUArchState *env = cpu->env_ptr;
>   
> -    qemu_spin_init(&env->tlb_lock);
> +    qemu_spin_init(&env->tlb_c.lock);
>   }
>   
>   /* flush_all_helper: run fn across all cpus
> @@ -134,15 +134,15 @@ static void tlb_flush_nocheck(CPUState *cpu)
>       tlb_debug("(count: %zu)\n", tlb_flush_count());
>   
>       /*
> -     * tlb_table/tlb_v_table updates from any thread must hold tlb_lock.
> +     * tlb_table/tlb_v_table updates from any thread must hold tlb_c.lock.
>        * However, updates from the owner thread (as is the case here; see the
>        * above assert_cpu_is_self) do not need atomic_set because all reads
>        * that do not hold the lock are performed by the same owner thread.
>        */
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       memset(env->tlb_table, -1, sizeof(env->tlb_table));
>       memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   
>       cpu_tb_jmp_cache_clear(cpu);
>   
> @@ -195,7 +195,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
>   
>       tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
>   
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>   
>           if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
> @@ -205,7 +205,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
>               memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
>           }
>       }
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   
>       cpu_tb_jmp_cache_clear(cpu);
>   
> @@ -262,7 +262,7 @@ static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
>              tlb_hit_page(tlb_entry->addr_code, page);
>   }
>   
> -/* Called with tlb_lock held */
> +/* Called with tlb_c.lock held */
>   static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
>                                             target_ulong page)
>   {
> @@ -271,7 +271,7 @@ static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
>       }
>   }
>   
> -/* Called with tlb_lock held */
> +/* Called with tlb_c.lock held */
>   static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
>                                                 target_ulong page)
>   {
> @@ -304,12 +304,12 @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
>       }
>   
>       addr &= TARGET_PAGE_MASK;
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>           tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
>           tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
>       }
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   
>       tb_flush_jmp_cache(cpu, addr);
>   }
> @@ -345,14 +345,14 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
>       tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
>                 addr, mmu_idx_bitmap);
>   
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>           if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
>               tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
>               tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
>           }
>       }
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   
>       tb_flush_jmp_cache(cpu, addr);
>   }
> @@ -479,7 +479,7 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
>    * te->addr_write with atomic_set. We don't need to worry about this for
>    * oversized guests as MTTCG is disabled for them.
>    *
> - * Called with tlb_lock held.
> + * Called with tlb_c.lock held.
>    */
>   static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
>                                            uintptr_t start, uintptr_t length)
> @@ -501,7 +501,7 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
>   }
>   
>   /*
> - * Called with tlb_lock held.
> + * Called with tlb_c.lock held.
>    * Called only from the vCPU context, i.e. the TLB's owner thread.
>    */
>   static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
> @@ -511,7 +511,7 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
>   
>   /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
>    * the target vCPU).
> - * We must take tlb_lock to avoid racing with another vCPU update. The only
> + * We must take tlb_c.lock to avoid racing with another vCPU update. The only
>    * thing actually updated is the target TLB entry ->addr_write flags.
>    */
>   void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
> @@ -521,7 +521,7 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
>       int mmu_idx;
>   
>       env = cpu->env_ptr;
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>           unsigned int i;
>   
> @@ -535,10 +535,10 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
>                                            length);
>           }
>       }
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   }
>   
> -/* Called with tlb_lock held */
> +/* Called with tlb_c.lock held */
>   static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
>                                            target_ulong vaddr)
>   {
> @@ -557,7 +557,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
>       assert_cpu_is_self(cpu);
>   
>       vaddr &= TARGET_PAGE_MASK;
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>           tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
>       }
> @@ -568,7 +568,7 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
>               tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
>           }
>       }
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   }
>   
>   /* Our TLB does not support large pages, so remember the area covered by
> @@ -669,7 +669,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>        * a longer critical section, but this is not a concern since the TLB lock
>        * is unlikely to be contended.
>        */
> -    qemu_spin_lock(&env->tlb_lock);
> +    qemu_spin_lock(&env->tlb_c.lock);
>   
>       /* Make sure there's no cached translation for the new page.  */
>       tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
> @@ -736,7 +736,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>       }
>   
>       copy_tlb_helper_locked(te, &tn);
> -    qemu_spin_unlock(&env->tlb_lock);
> +    qemu_spin_unlock(&env->tlb_c.lock);
>   }
>   
>   /* Add a new TLB entry, but without specifying the memory
> @@ -917,11 +917,11 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
>               /* Found entry in victim tlb, swap tlb and iotlb.  */
>               CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
>   
> -            qemu_spin_lock(&env->tlb_lock);
> +            qemu_spin_lock(&env->tlb_c.lock);
>               copy_tlb_helper_locked(&tmptlb, tlb);
>               copy_tlb_helper_locked(tlb, vtlb);
>               copy_tlb_helper_locked(vtlb, &tmptlb);
> -            qemu_spin_unlock(&env->tlb_lock);
> +            qemu_spin_unlock(&env->tlb_c.lock);
>   
>               CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
>               CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
> 

  reply	other threads:[~2018-10-23 11:03 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-23  7:02 [Qemu-devel] [PATCH 00/10] cputlb: track dirty tlbs and general cleanup Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 01/10] cputlb: Move tlb_lock to CPUTLBCommon Richard Henderson
2018-10-23 11:03   ` Philippe Mathieu-Daudé [this message]
2018-10-23  7:02 ` [Qemu-devel] [PATCH, build fix] osdep: Work around MinGW assert Richard Henderson
2018-10-23 11:02   ` Philippe Mathieu-Daudé
2018-10-23  7:02 ` [Qemu-devel] [PATCH 02/10] cputlb: Remove tcg_enabled hack from tlb_flush_nocheck Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 03/10] cputlb: Move cpu->pending_tlb_flush to env->tlb_c.pending_flush Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 04/10] cputlb: Split large page tracking per mmu_idx Richard Henderson
2018-10-27  0:16   ` Emilio G. Cota
2018-10-28  2:30     ` Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 05/10] cputlb: Move env->vtlb_index to env->tlb_d.vindex Richard Henderson
2018-10-23 11:07   ` Philippe Mathieu-Daudé
2018-10-23  7:02 ` [Qemu-devel] [PATCH 06/10] cputlb: Merge tlb_flush_nocheck into tlb_flush_by_mmuidx_async_work Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 07/10] cputlb: Merge tlb_flush_page into tlb_flush_page_by_mmuidx Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 08/10] cputlb: Count "partial" and "elided" tlb flushes Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 09/10] cputlb: Filter flushes on already clean tlbs Richard Henderson
2018-10-23  7:02 ` [Qemu-devel] [PATCH 10/10] cputlb: Remove tlb_c.pending_flushes Richard Henderson
2018-10-23 17:11 ` [Qemu-devel] [PATCH 00/10] cputlb: track dirty tlbs and general cleanup Emilio G. Cota

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=34f9dd7d-a5e2-9e71-c931-edbf42ec550d@redhat.com \
    --to=philmd@redhat.com \
    --cc=cota@braap.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.