linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@gmail.com>
To: Peter Collingbourne <pcc@google.com>
Cc: Alexander Potapenko <glider@google.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	 Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	 Jann Horn <jannh@google.com>,
	Evgenii Stepanov <eugenis@google.com>,
	 Linux Memory Management List <linux-mm@kvack.org>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>
Subject: Re: [PATCH v4 2/4] kasan: use separate (un)poison implementation for integrated init
Date: Wed, 2 Jun 2021 15:19:54 +0300	[thread overview]
Message-ID: <CA+fCnZf=BKRMwuLkqEpVAp=L8c87=PjFdxajAuKNOtU7KgnLQw@mail.gmail.com> (raw)
In-Reply-To: <CAMn1gO7HbTZXRJtpdGugoK8GbnBWM2ewBrAStAt4bPb1czFstA@mail.gmail.com>

On Tue, Jun 1, 2021 at 10:29 PM Peter Collingbourne <pcc@google.com> wrote:
>
> On Fri, May 28, 2021 at 3:25 AM Andrey Konovalov <andreyknvl@gmail.com> wrote:
> >
> > On Fri, May 28, 2021 at 4:04 AM Peter Collingbourne <pcc@google.com> wrote:
> > >
> > > Currently with integrated init page_alloc.c needs to know whether
> > > kasan_alloc_pages() will zero initialize memory, but this will start
> > > becoming more complicated once we start adding tag initialization
> > > support for user pages. To avoid page_alloc.c needing to know more
> > > details of what integrated init will do, move the unpoisoning logic
> > > for integrated init into the HW tags implementation. Currently the
> > > logic is identical but it will diverge in subsequent patches.
> > >
> > > For symmetry do the same for poisoning although this logic will
> > > be unaffected by subsequent patches.
> > >
> > > Signed-off-by: Peter Collingbourne <pcc@google.com>
> > > Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
> > > ---
> > > v4:
> > > - use IS_ENABLED(CONFIG_KASAN)
> > > - add comments to kasan_alloc_pages and kasan_free_pages
> > > - remove line break
> > >
> > > v3:
> > > - use BUILD_BUG()
> > >
> > > v2:
> > > - fix build with KASAN disabled
> > >
> > >  include/linux/kasan.h | 64 +++++++++++++++++++++++++------------------
> > >  mm/kasan/common.c     |  4 +--
> > >  mm/kasan/hw_tags.c    | 22 +++++++++++++++
> > >  mm/mempool.c          |  6 ++--
> > >  mm/page_alloc.c       | 55 +++++++++++++++++++------------------
> > >  5 files changed, 95 insertions(+), 56 deletions(-)
> > >
> > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > index b1678a61e6a7..a1c7ce5f3e4f 100644
> > > --- a/include/linux/kasan.h
> > > +++ b/include/linux/kasan.h
> > > @@ -2,6 +2,7 @@
> > >  #ifndef _LINUX_KASAN_H
> > >  #define _LINUX_KASAN_H
> > >
> > > +#include <linux/bug.h>
> > >  #include <linux/static_key.h>
> > >  #include <linux/types.h>
> > >
> > > @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
> > >
> > >  #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
> > >
> > > -#ifdef CONFIG_KASAN
> > > -
> > > -struct kasan_cache {
> > > -       int alloc_meta_offset;
> > > -       int free_meta_offset;
> > > -       bool is_kmalloc;
> > > -};
> > > -
> > >  #ifdef CONFIG_KASAN_HW_TAGS
> > >
> > >  DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
> > > @@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void)
> > >         return kasan_enabled();
> > >  }
> > >
> > > +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
> > > +void kasan_free_pages(struct page *page, unsigned int order);
> > > +
> > >  #else /* CONFIG_KASAN_HW_TAGS */
> > >
> > >  static inline bool kasan_enabled(void)
> > >  {
> > > -       return true;
> > > +       return IS_ENABLED(CONFIG_KASAN);
> > >  }
> > >
> > >  static inline bool kasan_has_integrated_init(void)
> > > @@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void)
> > >         return false;
> > >  }
> > >
> > > +static __always_inline void kasan_alloc_pages(struct page *page,
> > > +                                             unsigned int order, gfp_t flags)
> > > +{
> > > +       /* Only available for integrated init. */
> > > +       BUILD_BUG();
> > > +}
> > > +
> > > +static __always_inline void kasan_free_pages(struct page *page,
> > > +                                            unsigned int order)
> > > +{
> > > +       /* Only available for integrated init. */
> > > +       BUILD_BUG();
> > > +}
> > > +
> > >  #endif /* CONFIG_KASAN_HW_TAGS */
> > >
> > > +#ifdef CONFIG_KASAN
> > > +
> > > +struct kasan_cache {
> > > +       int alloc_meta_offset;
> > > +       int free_meta_offset;
> > > +       bool is_kmalloc;
> > > +};
> > > +
> > >  slab_flags_t __kasan_never_merge(void);
> > >  static __always_inline slab_flags_t kasan_never_merge(void)
> > >  {
> > > @@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
> > >                 __kasan_unpoison_range(addr, size);
> > >  }
> > >
> > > -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
> > > -static __always_inline void kasan_alloc_pages(struct page *page,
> > > +void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
> > > +static __always_inline void kasan_poison_pages(struct page *page,
> > >                                                 unsigned int order, bool init)
> > >  {
> > >         if (kasan_enabled())
> > > -               __kasan_alloc_pages(page, order, init);
> > > +               __kasan_poison_pages(page, order, init);
> > >  }
> > >
> > > -void __kasan_free_pages(struct page *page, unsigned int order, bool init);
> > > -static __always_inline void kasan_free_pages(struct page *page,
> > > -                                               unsigned int order, bool init)
> > > +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> > > +static __always_inline void kasan_unpoison_pages(struct page *page,
> > > +                                                unsigned int order, bool init)
> > >  {
> > >         if (kasan_enabled())
> > > -               __kasan_free_pages(page, order, init);
> > > +               __kasan_unpoison_pages(page, order, init);
> > >  }
> > >
> > >  void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> > > @@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled);
> > >
> > >  #else /* CONFIG_KASAN */
> > >
> > > -static inline bool kasan_enabled(void)
> > > -{
> > > -       return false;
> > > -}
> > > -static inline bool kasan_has_integrated_init(void)
> > > -{
> > > -       return false;
> > > -}
> > >  static inline slab_flags_t kasan_never_merge(void)
> > >  {
> > >         return 0;
> > >  }
> > >  static inline void kasan_unpoison_range(const void *address, size_t size) {}
> > > -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
> > > -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
> > > +static inline void kasan_poison_pages(struct page *page, unsigned int order,
> > > +                                     bool init) {}
> > > +static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
> > > +                                       bool init) {}
> > >  static inline void kasan_cache_create(struct kmem_cache *cache,
> > >                                       unsigned int *size,
> > >                                       slab_flags_t *flags) {}
> > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > index 6bb87f2acd4e..0ecd293af344 100644
> > > --- a/mm/kasan/common.c
> > > +++ b/mm/kasan/common.c
> > > @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
> > >         return 0;
> > >  }
> > >
> > > -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> > > +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
> > >  {
> > >         u8 tag;
> > >         unsigned long i;
> > > @@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
> > >         kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
> > >  }
> > >
> > > -void __kasan_free_pages(struct page *page, unsigned int order, bool init)
> > > +void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
> > >  {
> > >         if (likely(!PageHighMem(page)))
> > >                 kasan_poison(page_address(page), PAGE_SIZE << order,
> > > diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> > > index 4004388b4e4b..9d0f6f934016 100644
> > > --- a/mm/kasan/hw_tags.c
> > > +++ b/mm/kasan/hw_tags.c
> > > @@ -238,6 +238,28 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
> > >         return &alloc_meta->free_track[0];
> > >  }
> > >
> > > +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
> > > +{
> > > +       /*
> > > +        * This condition should match the one in post_alloc_hook() in
> > > +        * page_alloc.c.
> > > +        */
> > > +       bool init = !want_init_on_free() && want_init_on_alloc(flags);
> >
> > Now we have a comment here ...
> >
> > > +
> > > +       kasan_unpoison_pages(page, order, init);
> > > +}
> > > +
> > > +void kasan_free_pages(struct page *page, unsigned int order)
> > > +{
> > > +       /*
> > > +        * This condition should match the one in free_pages_prepare() in
> > > +        * page_alloc.c.
> > > +        */
> > > +       bool init = want_init_on_free();
> >
> > and here, ...
> >
> > > +
> > > +       kasan_poison_pages(page, order, init);
> > > +}
> > > +
> > >  #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
> > >
> > >  void kasan_set_tagging_report_once(bool state)
> > > diff --git a/mm/mempool.c b/mm/mempool.c
> > > index a258cf4de575..0b8afbec3e35 100644
> > > --- a/mm/mempool.c
> > > +++ b/mm/mempool.c
> > > @@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
> > >         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> > >                 kasan_slab_free_mempool(element);
> > >         else if (pool->alloc == mempool_alloc_pages)
> > > -               kasan_free_pages(element, (unsigned long)pool->pool_data, false);
> > > +               kasan_poison_pages(element, (unsigned long)pool->pool_data,
> > > +                                  false);
> > >  }
> > >
> > >  static void kasan_unpoison_element(mempool_t *pool, void *element)
> > > @@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
> > >         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> > >                 kasan_unpoison_range(element, __ksize(element));
> > >         else if (pool->alloc == mempool_alloc_pages)
> > > -               kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
> > > +               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
> > > +                                    false);
> > >  }
> > >
> > >  static __always_inline void add_element(mempool_t *pool, void *element)
> > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > > index aaa1655cf682..4fddb7cac3c6 100644
> > > --- a/mm/page_alloc.c
> > > +++ b/mm/page_alloc.c
> > > @@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
> > >  static DEFINE_STATIC_KEY_TRUE(deferred_pages);
> > >
> > >  /*
> > > - * Calling kasan_free_pages() only after deferred memory initialization
> > > + * Calling kasan_poison_pages() only after deferred memory initialization
> > >   * has completed. Poisoning pages during deferred memory init will greatly
> > >   * lengthen the process and cause problem in large memory systems as the
> > >   * deferred pages initialization is done with interrupt disabled.
> > > @@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
> > >   * on-demand allocation and then freed again before the deferred pages
> > >   * initialization is done, but this is not likely to happen.
> > >   */
> > > -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> > > -                                               bool init, fpi_t fpi_flags)
> > > +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
> > >  {
> > > -       if (static_branch_unlikely(&deferred_pages))
> > > -               return;
> > > -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > > -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> > > -               return;
> > > -       kasan_free_pages(page, order, init);
> > > +       return static_branch_unlikely(&deferred_pages) ||
> > > +              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > > +               (fpi_flags & FPI_SKIP_KASAN_POISON));
> > >  }
> > >
> > >  /* Returns true if the struct page for the pfn is uninitialised */
> > > @@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
> > >         return false;
> > >  }
> > >  #else
> > > -static inline void kasan_free_nondeferred_pages(struct page *page, int order,
> > > -                                               bool init, fpi_t fpi_flags)
> > > +static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
> > >  {
> > > -       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > > -                       (fpi_flags & FPI_SKIP_KASAN_POISON))
> > > -               return;
> > > -       kasan_free_pages(page, order, init);
> > > +       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> > > +               (fpi_flags & FPI_SKIP_KASAN_POISON));
> > >  }
> > >
> > >  static inline bool early_page_uninitialised(unsigned long pfn)
> > > @@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
> > >                         unsigned int order, bool check_free, fpi_t fpi_flags)
> > >  {
> > >         int bad = 0;
> > > -       bool init;
> > > +       bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
> > >
> > >         VM_BUG_ON_PAGE(PageTail(page), page);
> > >
> > > @@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
> > >          * With hardware tag-based KASAN, memory tags must be set before the
> > >          * page becomes unavailable via debug_pagealloc or arch_free_page.
> > >          */
> > > -       init = want_init_on_free();
> > > -       if (init && !kasan_has_integrated_init())
> > > -               kernel_init_free_pages(page, 1 << order);
> > > -       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
> > > +       if (kasan_has_integrated_init()) {
> > > +               if (!skip_kasan_poison)
> > > +                       kasan_free_pages(page, order);
> > > +       } else {
> > > +               bool init = want_init_on_free();
> >
> > ... but not here ...
> >
> > > +
> > > +               if (init)
> > > +                       kernel_init_free_pages(page, 1 << order);
> > > +               if (!skip_kasan_poison)
> > > +                       kasan_poison_pages(page, order, init);
> > > +       }
> > >
> > >         /*
> > >          * arch_free_page() can make the page's contents inaccessible.  s390
> > > @@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
> > >  inline void post_alloc_hook(struct page *page, unsigned int order,
> > >                                 gfp_t gfp_flags)
> > >  {
> > > -       bool init;
> > > -
> > >         set_page_private(page, 0);
> > >         set_page_refcounted(page);
> > >
> > > @@ -2344,10 +2342,15 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
> > >          * kasan_alloc_pages and kernel_init_free_pages must be
> > >          * kept together to avoid discrepancies in behavior.
> > >          */
> > > -       init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> > > -       kasan_alloc_pages(page, order, init);
> > > -       if (init && !kasan_has_integrated_init())
> > > -               kernel_init_free_pages(page, 1 << order);
> > > +       if (kasan_has_integrated_init()) {
> > > +               kasan_alloc_pages(page, order, gfp_flags);
> > > +       } else {
> > > +               bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
> >
> > ... or here.
> >
> > So if someone updates one of these conditions, they might forget the
> > ones in KASAN code.
> >
> > Is there a strong reason not to use a macro or static inline helper?
> > If not, let's do that.
>
> I'm not sure that it will accomplish much. It isn't much code after
> all and it means that we are adding another level of indirection which
> readers will need to look through in order to understand what is going
> on.
>
> We already have this comment in free_pages_prepare:
>
>         /*
>          * As memory initialization might be integrated into KASAN,
>          * kasan_free_pages and kernel_init_free_pages must be
>          * kept together to avoid discrepancies in behavior.
>          *
>          * With hardware tag-based KASAN, memory tags must be set before the
>          * page becomes unavailable via debug_pagealloc or arch_free_page.
>          */
>
> and this one in post_alloc_hook:
>
>         /*
>          * As memory initialization might be integrated into KASAN,
>          * kasan_alloc_pages and kernel_init_free_pages must be
>          * kept together to avoid discrepancies in behavior.
>          */
>
> Is that not enough?

Ah, forgot about those two. Alright, let's keep this version with comments then.


  reply	other threads:[~2021-06-02 12:20 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-28  1:04 [PATCH v4 0/4] arm64: improve efficiency of setting tags for user pages Peter Collingbourne
2021-05-28  1:04 ` [PATCH v4 1/4] mm: arch: remove indirection level in alloc_zeroed_user_highpage_movable() Peter Collingbourne
2021-05-28 16:33   ` kernel test robot
2021-05-28  1:04 ` [PATCH v4 2/4] kasan: use separate (un)poison implementation for integrated init Peter Collingbourne
2021-05-28 10:25   ` Andrey Konovalov
2021-06-01 19:28     ` Peter Collingbourne
2021-06-02 12:19       ` Andrey Konovalov [this message]
2021-05-28  1:04 ` [PATCH v4 3/4] arm64: mte: handle tags zeroing at page allocation time Peter Collingbourne
2021-05-28  1:04 ` [PATCH v4 4/4] kasan: disable freed user page poisoning with HW tags Peter Collingbourne

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CA+fCnZf=BKRMwuLkqEpVAp=L8c87=PjFdxajAuKNOtU7KgnLQw@mail.gmail.com' \
    --to=andreyknvl@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=eugenis@google.com \
    --cc=glider@google.com \
    --cc=jannh@google.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-mm@kvack.org \
    --cc=pcc@google.com \
    --cc=vincenzo.frascino@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).