From: Tom Lendacky <thomas.lendacky@amd.com>
To: Thomas Gleixner <tglx@linutronix.de>,
LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Kees Cook <keescook@chromium.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Juergen Gross <jgross@suse.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>
Subject: Re: [patch 05/15] x86/tlb: Move __flush_tlb() out of line
Date: Mon, 20 Apr 2020 08:48:50 -0500 [thread overview]
Message-ID: <5857df01-abeb-c6cd-8e92-64eb365dc835@amd.com> (raw)
In-Reply-To: <20200419203336.134117165@linutronix.de>
On 4/19/20 3:31 PM, Thomas Gleixner wrote:
> cpu_tlbstate is exported because various TLB related functions need access
> to it, but cpu_tlbstate is sensitive information which should only be
> accessed by well contained kernel functions and not be directly exposed to
> modules.
>
> The various TLB flush functions need access to cpu_tlbstate. As a first
> step move __flush_tlb() out of line and hide the native function. The
> latter can be static when CONFIG_PARAVIRT is disabled.
>
> Consolidate the name space while at it and remove the pointless extra
> wrapper in the paravirt code.
>
> No functional change.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
> Cc: Juergen Gross <jgross@suse.com>
> ---
> arch/x86/include/asm/paravirt.h | 4 +++-
> arch/x86/include/asm/tlbflush.h | 29 +++++------------------------
> arch/x86/kernel/cpu/mtrr/generic.c | 4 ++--
> arch/x86/kernel/paravirt.c | 7 +------
> arch/x86/mm/mem_encrypt.c | 2 +-
> arch/x86/mm/tlb.c | 33 ++++++++++++++++++++++++++++++++-
> arch/x86/platform/uv/tlb_uv.c | 2 +-
> 7 files changed, 45 insertions(+), 36 deletions(-)
>
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -47,7 +47,9 @@ static inline void slow_down_io(void)
> #endif
> }
>
> -static inline void __flush_tlb(void)
> +void native_flush_tlb_local(void);
> +
> +static inline void __flush_tlb_local(void)
> {
> PVOP_VCALL0(mmu.flush_tlb_user);
> }
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -140,12 +140,13 @@ static inline unsigned long build_cr3_no
> return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
> }
>
> +void flush_tlb_local(void);
> +
> #ifdef CONFIG_PARAVIRT
> #include <asm/paravirt.h>
> #else
> -#define __flush_tlb() __native_flush_tlb()
> -#define __flush_tlb_global() __native_flush_tlb_global()
> -#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
> +#define __flush_tlb_global() __native_flush_tlb_global()
> +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
> #endif
>
> struct tlb_context {
> @@ -371,24 +372,6 @@ static inline void invalidate_user_asid(
> }
>
> /*
> - * flush the entire current user mapping
> - */
> -static inline void __native_flush_tlb(void)
> -{
> - /*
> - * Preemption or interrupts must be disabled to protect the access
> - * to the per CPU variable and to prevent being preempted between
> - * read_cr3() and write_cr3().
> - */
> - WARN_ON_ONCE(preemptible());
> -
> - invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
> -
> - /* If current->mm == NULL then the read_cr3() "borrows" an mm */
> - native_write_cr3(__native_read_cr3());
> -}
> -
> -/*
> * flush everything
> */
> static inline void __native_flush_tlb_global(void)
> @@ -461,7 +444,7 @@ static inline void __flush_tlb_all(void)
> /*
> * !PGE -> !PCID (setup_pcid()), thus every flush is total.
> */
> - __flush_tlb();
> + flush_tlb_local();
> }
> }
>
> @@ -537,8 +520,6 @@ struct flush_tlb_info {
> bool freed_tables;
> };
>
> -#define local_flush_tlb() __flush_tlb()
> -
> #define flush_tlb_mm(mm) \
> flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
>
> --- a/arch/x86/kernel/cpu/mtrr/generic.c
> +++ b/arch/x86/kernel/cpu/mtrr/generic.c
> @@ -761,7 +761,7 @@ static void prepare_set(void) __acquires
>
> /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
> count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
> - __flush_tlb();
> + flush_tlb_local();
>
> /* Save MTRR state */
> rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
> @@ -778,7 +778,7 @@ static void post_set(void) __releases(se
> {
> /* Flush TLBs (no need to flush caches - they are disabled) */
> count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
> - __flush_tlb();
> + flush_tlb_local();
>
> /* Intel (P6) standard MTRRs */
> mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -160,11 +160,6 @@ unsigned paravirt_patch_insns(void *insn
> return insn_len;
> }
>
> -static void native_flush_tlb(void)
> -{
> - __native_flush_tlb();
> -}
> -
> /*
> * Global pages have to be flushed a bit differently. Not a real
> * performance problem because this does not happen often.
> @@ -359,7 +354,7 @@ struct paravirt_patch_template pv_ops =
> #endif /* CONFIG_PARAVIRT_XXL */
>
> /* Mmu ops. */
> - .mmu.flush_tlb_user = native_flush_tlb,
> + .mmu.flush_tlb_user = native_flush_tlb_local,
> .mmu.flush_tlb_kernel = native_flush_tlb_global,
> .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
> .mmu.flush_tlb_others = native_flush_tlb_others,
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap
> size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
> } while (size);
>
> - __native_flush_tlb();
> + flush_tlb_local();
This invoked __native_flush_tlb() because of how early it is called and
the paravirt ops support isn't set up yet, resulting in a crash if not
invoking the native version directly. So this needs a "native" version of
the tlb flush to invoke.
Thanks,
Tom
> }
>
> void __init sme_unmap_bootdata(char *real_mode_data)
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -18,6 +18,13 @@
>
> #include "mm_internal.h"
>
> +#ifdef CONFIG_PARAVIRT
> +# define STATIC_NOPV
> +#else
> +# define STATIC_NOPV static
> +# define __flush_tlb_local native_flush_tlb_local
> +#endif
> +
> /*
> * TLB flushing, formerly SMP-only
> * c/o Linus Torvalds.
> @@ -645,7 +652,7 @@ static void flush_tlb_func_common(const
> trace_tlb_flush(reason, nr_invalidate);
> } else {
> /* Full flush. */
> - local_flush_tlb();
> + flush_tlb_local();
> if (local)
> count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
> trace_tlb_flush(reason, TLB_FLUSH_ALL);
> @@ -884,6 +891,30 @@ unsigned long __get_current_cr3_fast(voi
> EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
>
> /*
> + * Flush the entire current user mapping
> + */
> +STATIC_NOPV void native_flush_tlb_local(void)
> +{
> + /*
> + * Preemption or interrupts must be disabled to protect the access
> + * to the per CPU variable and to prevent being preempted between
> + * read_cr3() and write_cr3().
> + */
> + WARN_ON_ONCE(preemptible());
> +
> + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
> +
> + /* If current->mm == NULL then the read_cr3() "borrows" an mm */
> + native_write_cr3(__native_read_cr3());
> +}
> +
> +void flush_tlb_local(void)
> +{
> + __flush_tlb_local();
> +}
> +EXPORT_SYMBOL_GPL(flush_tlb_local);
> +
> +/*
> * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
> * This means that the 'struct flush_tlb_info' that describes which mappings to
> * flush is actually fixed. We therefore set a single fixed struct and use it in
> --- a/arch/x86/platform/uv/tlb_uv.c
> +++ b/arch/x86/platform/uv/tlb_uv.c
> @@ -293,7 +293,7 @@ static void bau_process_message(struct m
> * This must be a normal message, or retry of a normal message
> */
> if (msg->address == TLB_FLUSH_ALL) {
> - local_flush_tlb();
> + flush_tlb_local();
> stat->d_alltlb++;
> } else {
> __flush_tlb_one_user(msg->address);
>
next prev parent reply other threads:[~2020-04-20 13:51 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-19 20:31 [patch 00/15] x86/tlb: Unexport per-CPU tlbstate Thomas Gleixner
2020-04-19 20:31 ` [patch 01/15] x86/tlb: Uninline __get_current_cr3_fast() Thomas Gleixner
2020-04-19 20:31 ` [patch 02/15] x86/cpu: Uninline CR4 accessors Thomas Gleixner
2020-04-20 9:01 ` Christoph Hellwig
2020-04-20 9:34 ` Borislav Petkov
2020-04-20 17:25 ` Thomas Gleixner
2020-04-19 20:31 ` [patch 03/15] x86/cr4: Sanitize CR4.PCE update Thomas Gleixner
2020-04-19 20:31 ` [patch 04/15] x86/alternatives: Move temporary_mm helpers into C Thomas Gleixner
2020-04-19 20:31 ` [patch 05/15] x86/tlb: Move __flush_tlb() out of line Thomas Gleixner
2020-04-20 13:48 ` Tom Lendacky [this message]
2020-04-20 14:03 ` Jürgen Groß
2020-04-20 14:26 ` Tom Lendacky
2020-04-20 14:38 ` Jürgen Groß
2020-04-20 18:30 ` Tom Lendacky
2020-04-19 20:31 ` [patch 06/15] x86/tlb: Move __flush_tlb_global() " Thomas Gleixner
2020-04-19 20:31 ` [patch 07/15] x86/tlb: Move __flush_tlb_one_user() " Thomas Gleixner
2020-04-19 20:31 ` [patch 08/15] x86/tlb: Move __flush_tlb_one_kernel() " Thomas Gleixner
2020-04-19 20:31 ` [patch 09/15] x86/tlb: Move flush_tlb_others() " Thomas Gleixner
2020-04-19 20:31 ` [patch 10/15] x86/tlb: Move paravirt_tlb_remove_table() to the usage site Thomas Gleixner
2020-04-19 20:31 ` [patch 11/15] x86/tlb: Move cr4_set_bits_and_update_boot() " Thomas Gleixner
2020-04-19 20:31 ` [patch 12/15] x86/tlb: Uninline nmi_uaccess_okay() Thomas Gleixner
2020-04-19 20:31 ` [patch 13/15] x86/tlb: Move PCID helpers where they are used Thomas Gleixner
2020-04-19 20:31 ` [patch 14/15] xen/privcmd: Remove unneeded asm/tlb.h include Thomas Gleixner
2020-04-19 20:31 ` [patch 15/15] x86/tlb: Restrict access to tlbstate Thomas Gleixner
2020-04-20 9:20 ` [patch 00/15] x86/tlb: Unexport per-CPU tlbstate Christoph Hellwig
2020-04-20 16:58 ` Alexandre Chartre
2020-04-20 20:08 ` Thomas Gleixner
2020-04-20 17:27 ` Thomas Gleixner
2020-04-21 8:09 ` Sean Christopherson
2020-04-21 9:09 ` Thomas Gleixner
2020-04-22 0:42 ` Sean Christopherson
2020-04-20 10:25 ` Peter Zijlstra
2020-04-20 16:33 ` Alexandre Chartre
2020-04-21 17:10 ` Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5857df01-abeb-c6cd-8e92-64eb365dc835@amd.com \
--to=thomas.lendacky@amd.com \
--cc=boris.ostrovsky@oracle.com \
--cc=jgross@suse.com \
--cc=keescook@chromium.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).