All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexander Potapenko <glider@google.com>
To: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Marco Elver <elver@google.com>,
	Evgenii Stepanov <eugenis@google.com>,
	Branislav Rankov <Branislav.Rankov@arm.com>,
	Kevin Brodsky <kevin.brodsky@arm.com>,
	kasan-dev <kasan-dev@googlegroups.com>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Linux Memory Management List <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH mm v10 05/42] kasan: rename (un)poison_shadow to (un)poison_range
Date: Wed, 18 Nov 2020 16:30:15 +0100	[thread overview]
Message-ID: <CAG_fn=Wy7fLJd46=N9U-yQAQreioEf2ny+CGNmhUVYpbWiXA1Q@mail.gmail.com> (raw)
In-Reply-To: <c305a433db6fe8ef194cddf8615db0ef7a3b0355.1605305705.git.andreyknvl@google.com>

On Fri, Nov 13, 2020 at 11:16 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> This is a preparatory commit for the upcoming addition of a new hardware
> tag-based (MTE-based) KASAN mode.
>
> The new mode won't be using shadow memory. Rename external annotation
> kasan_unpoison_shadow() to kasan_unpoison_range(), and introduce internal
> functions (un)poison_range() (without kasan_ prefix).
>
> Co-developed-by: Marco Elver <elver@google.com>
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Alexander Potapenko <glider@google.com>

> ---
> Change-Id: Ia359f32815242c4704e49a5f1639ca2d2f8cba69
> ---
>  include/linux/kasan.h |  6 +++---
>  kernel/fork.c         |  4 ++--
>  mm/kasan/common.c     | 49 ++++++++++++++++++++++++-------------------
>  mm/kasan/generic.c    | 23 ++++++++++----------
>  mm/kasan/kasan.h      |  3 ++-
>  mm/kasan/tags.c       |  2 +-
>  mm/slab_common.c      |  2 +-
>  7 files changed, 47 insertions(+), 42 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 26f2ab92e7ca..d237051dca58 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -71,7 +71,7 @@ extern void kasan_enable_current(void);
>  /* Disable reporting bugs for current task */
>  extern void kasan_disable_current(void);
>
> -void kasan_unpoison_shadow(const void *address, size_t size);
> +void kasan_unpoison_range(const void *address, size_t size);
>
>  void kasan_unpoison_task_stack(struct task_struct *task);
>
> @@ -108,7 +108,7 @@ struct kasan_cache {
>  size_t __ksize(const void *);
>  static inline void kasan_unpoison_slab(const void *ptr)
>  {
> -       kasan_unpoison_shadow(ptr, __ksize(ptr));
> +       kasan_unpoison_range(ptr, __ksize(ptr));
>  }
>  size_t kasan_metadata_size(struct kmem_cache *cache);
>
> @@ -117,7 +117,7 @@ void kasan_restore_multi_shot(bool enabled);
>
>  #else /* CONFIG_KASAN */
>
> -static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
> +static inline void kasan_unpoison_range(const void *address, size_t size) {}
>
>  static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
>
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 15f189bb8ec4..bee52236f09b 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
>                 if (!s)
>                         continue;
>
> -               /* Clear the KASAN shadow of the stack. */
> -               kasan_unpoison_shadow(s->addr, THREAD_SIZE);
> +               /* Mark stack accessible for KASAN. */
> +               kasan_unpoison_range(s->addr, THREAD_SIZE);
>
>                 /* Clear stale pointers from reused stack. */
>                 memset(s->addr, 0, THREAD_SIZE);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index f5739be60edc..6adbf5891aff 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -109,7 +109,7 @@ void *memcpy(void *dest, const void *src, size_t len)
>   * Poisons the shadow memory for 'size' bytes starting from 'addr'.
>   * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
>   */
> -void kasan_poison_shadow(const void *address, size_t size, u8 value)
> +void poison_range(const void *address, size_t size, u8 value)
>  {
>         void *shadow_start, *shadow_end;
>
> @@ -130,7 +130,7 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
>         __memset(shadow_start, value, shadow_end - shadow_start);
>  }
>
> -void kasan_unpoison_shadow(const void *address, size_t size)
> +void unpoison_range(const void *address, size_t size)
>  {
>         u8 tag = get_tag(address);
>
> @@ -149,7 +149,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
>         if (is_kfence_address(address))
>                 return;
>
> -       kasan_poison_shadow(address, size, tag);
> +       poison_range(address, size, tag);
>
>         if (size & KASAN_SHADOW_MASK) {
>                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
> @@ -161,12 +161,17 @@ void kasan_unpoison_shadow(const void *address, size_t size)
>         }
>  }
>
> +void kasan_unpoison_range(const void *address, size_t size)
> +{
> +       unpoison_range(address, size);
> +}
> +
>  static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
>  {
>         void *base = task_stack_page(task);
>         size_t size = sp - base;
>
> -       kasan_unpoison_shadow(base, size);
> +       unpoison_range(base, size);
>  }
>
>  /* Unpoison the entire stack for a task. */
> @@ -185,7 +190,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
>          */
>         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
>
> -       kasan_unpoison_shadow(base, watermark - base);
> +       unpoison_range(base, watermark - base);
>  }
>
>  void kasan_alloc_pages(struct page *page, unsigned int order)
> @@ -199,13 +204,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order)
>         tag = random_tag();
>         for (i = 0; i < (1 << order); i++)
>                 page_kasan_tag_set(page + i, tag);
> -       kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
> +       unpoison_range(page_address(page), PAGE_SIZE << order);
>  }
>
>  void kasan_free_pages(struct page *page, unsigned int order)
>  {
>         if (likely(!PageHighMem(page)))
> -               kasan_poison_shadow(page_address(page),
> +               poison_range(page_address(page),
>                                 PAGE_SIZE << order,
>                                 KASAN_FREE_PAGE);
>  }
> @@ -297,18 +302,18 @@ void kasan_poison_slab(struct page *page)
>
>         for (i = 0; i < compound_nr(page); i++)
>                 page_kasan_tag_reset(page + i);
> -       kasan_poison_shadow(page_address(page), page_size(page),
> -                       KASAN_KMALLOC_REDZONE);
> +       poison_range(page_address(page), page_size(page),
> +                    KASAN_KMALLOC_REDZONE);
>  }
>
>  void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
>  {
> -       kasan_unpoison_shadow(object, cache->object_size);
> +       unpoison_range(object, cache->object_size);
>  }
>
>  void kasan_poison_object_data(struct kmem_cache *cache, void *object)
>  {
> -       kasan_poison_shadow(object,
> +       poison_range(object,
>                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
>                         KASAN_KMALLOC_REDZONE);
>  }
> @@ -424,7 +429,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>         }
>
>         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
> -       kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
> +       poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
>
>         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
>                         unlikely(!(cache->flags & SLAB_KASAN)))
> @@ -467,9 +472,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
>                 tag = assign_tag(cache, object, false, keep_tag);
>
>         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
> -       kasan_unpoison_shadow(set_tag(object, tag), size);
> -       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> -               KASAN_KMALLOC_REDZONE);
> +       unpoison_range(set_tag(object, tag), size);
> +       poison_range((void *)redzone_start, redzone_end - redzone_start,
> +                    KASAN_KMALLOC_REDZONE);
>
>         if (cache->flags & SLAB_KASAN)
>                 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
> @@ -508,9 +513,9 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
>                                 KASAN_SHADOW_SCALE_SIZE);
>         redzone_end = (unsigned long)ptr + page_size(page);
>
> -       kasan_unpoison_shadow(ptr, size);
> -       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> -               KASAN_PAGE_REDZONE);
> +       unpoison_range(ptr, size);
> +       poison_range((void *)redzone_start, redzone_end - redzone_start,
> +                    KASAN_PAGE_REDZONE);
>
>         return (void *)ptr;
>  }
> @@ -542,7 +547,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
>                         kasan_report_invalid_free(ptr, ip);
>                         return;
>                 }
> -               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
> +               poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
>         } else {
>                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
>         }
> @@ -728,7 +733,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
>          * // vmalloc() allocates memory
>          * // let a = area->addr
>          * // we reach kasan_populate_vmalloc
> -        * // and call kasan_unpoison_shadow:
> +        * // and call unpoison_range:
>          * STORE shadow(a), unpoison_val
>          * ...
>          * STORE shadow(a+99), unpoison_val     x = LOAD p
> @@ -763,7 +768,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
>                 return;
>
>         size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
> -       kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
> +       poison_range(start, size, KASAN_VMALLOC_INVALID);
>  }
>
>  void kasan_unpoison_vmalloc(const void *start, unsigned long size)
> @@ -771,7 +776,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size)
>         if (!is_vmalloc_or_module_addr(start))
>                 return;
>
> -       kasan_unpoison_shadow(start, size);
> +       unpoison_range(start, size);
>  }
>
>  static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index d6a386255007..cdc2d8112f3e 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -203,11 +203,11 @@ static void register_global(struct kasan_global *global)
>  {
>         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
>
> -       kasan_unpoison_shadow(global->beg, global->size);
> +       unpoison_range(global->beg, global->size);
>
> -       kasan_poison_shadow(global->beg + aligned_size,
> -               global->size_with_redzone - aligned_size,
> -               KASAN_GLOBAL_REDZONE);
> +       poison_range(global->beg + aligned_size,
> +                    global->size_with_redzone - aligned_size,
> +                    KASAN_GLOBAL_REDZONE);
>  }
>
>  void __asan_register_globals(struct kasan_global *globals, size_t size)
> @@ -286,13 +286,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
>
>         WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
>
> -       kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
> -                             size - rounded_down_size);
> -       kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
> -                       KASAN_ALLOCA_LEFT);
> -       kasan_poison_shadow(right_redzone,
> -                       padding_size + KASAN_ALLOCA_REDZONE_SIZE,
> -                       KASAN_ALLOCA_RIGHT);
> +       unpoison_range((const void *)(addr + rounded_down_size),
> +                      size - rounded_down_size);
> +       poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
> +                    KASAN_ALLOCA_LEFT);
> +       poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
> +                    KASAN_ALLOCA_RIGHT);
>  }
>  EXPORT_SYMBOL(__asan_alloca_poison);
>
> @@ -302,7 +301,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
>         if (unlikely(!stack_top || stack_top > stack_bottom))
>                 return;
>
> -       kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
> +       unpoison_range(stack_top, stack_bottom - stack_top);
>  }
>  EXPORT_SYMBOL(__asan_allocas_unpoison);
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index ac499456740f..42ab02c61331 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -150,7 +150,8 @@ static inline bool addr_has_shadow(const void *addr)
>         return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
>  }
>
> -void kasan_poison_shadow(const void *address, size_t size, u8 value);
> +void poison_range(const void *address, size_t size, u8 value);
> +void unpoison_range(const void *address, size_t size);
>
>  /**
>   * check_memory_region - Check memory region, and report if invalid access.
> diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
> index 5c8b08a25715..c0b3f327812b 100644
> --- a/mm/kasan/tags.c
> +++ b/mm/kasan/tags.c
> @@ -153,7 +153,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort);
>
>  void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
>  {
> -       kasan_poison_shadow((void *)addr, size, tag);
> +       poison_range((void *)addr, size, tag);
>  }
>  EXPORT_SYMBOL(__hwasan_tag_memory);
>
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 479d17b90155..0b5ae1819a8b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1179,7 +1179,7 @@ size_t ksize(const void *objp)
>          * We assume that ksize callers could use whole allocated area,
>          * so we need to unpoison this area.
>          */
> -       kasan_unpoison_shadow(objp, size);
> +       kasan_unpoison_range(objp, size);
>         return size;
>  }
>  EXPORT_SYMBOL(ksize);
> --
> 2.29.2.299.gdc1121823c-goog
>


-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

WARNING: multiple messages have this Message-ID (diff)
From: Alexander Potapenko <glider@google.com>
To: Andrey Konovalov <andreyknvl@google.com>
Cc: Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Marco Elver <elver@google.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Kevin Brodsky <kevin.brodsky@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Branislav Rankov <Branislav.Rankov@arm.com>,
	kasan-dev <kasan-dev@googlegroups.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Linux Memory Management List <linux-mm@kvack.org>,
	Evgenii Stepanov <eugenis@google.com>,
	Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Dmitry Vyukov <dvyukov@google.com>
Subject: Re: [PATCH mm v10 05/42] kasan: rename (un)poison_shadow to (un)poison_range
Date: Wed, 18 Nov 2020 16:30:15 +0100	[thread overview]
Message-ID: <CAG_fn=Wy7fLJd46=N9U-yQAQreioEf2ny+CGNmhUVYpbWiXA1Q@mail.gmail.com> (raw)
In-Reply-To: <c305a433db6fe8ef194cddf8615db0ef7a3b0355.1605305705.git.andreyknvl@google.com>

On Fri, Nov 13, 2020 at 11:16 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> This is a preparatory commit for the upcoming addition of a new hardware
> tag-based (MTE-based) KASAN mode.
>
> The new mode won't be using shadow memory. Rename external annotation
> kasan_unpoison_shadow() to kasan_unpoison_range(), and introduce internal
> functions (un)poison_range() (without kasan_ prefix).
>
> Co-developed-by: Marco Elver <elver@google.com>
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Alexander Potapenko <glider@google.com>

> ---
> Change-Id: Ia359f32815242c4704e49a5f1639ca2d2f8cba69
> ---
>  include/linux/kasan.h |  6 +++---
>  kernel/fork.c         |  4 ++--
>  mm/kasan/common.c     | 49 ++++++++++++++++++++++++-------------------
>  mm/kasan/generic.c    | 23 ++++++++++----------
>  mm/kasan/kasan.h      |  3 ++-
>  mm/kasan/tags.c       |  2 +-
>  mm/slab_common.c      |  2 +-
>  7 files changed, 47 insertions(+), 42 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 26f2ab92e7ca..d237051dca58 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -71,7 +71,7 @@ extern void kasan_enable_current(void);
>  /* Disable reporting bugs for current task */
>  extern void kasan_disable_current(void);
>
> -void kasan_unpoison_shadow(const void *address, size_t size);
> +void kasan_unpoison_range(const void *address, size_t size);
>
>  void kasan_unpoison_task_stack(struct task_struct *task);
>
> @@ -108,7 +108,7 @@ struct kasan_cache {
>  size_t __ksize(const void *);
>  static inline void kasan_unpoison_slab(const void *ptr)
>  {
> -       kasan_unpoison_shadow(ptr, __ksize(ptr));
> +       kasan_unpoison_range(ptr, __ksize(ptr));
>  }
>  size_t kasan_metadata_size(struct kmem_cache *cache);
>
> @@ -117,7 +117,7 @@ void kasan_restore_multi_shot(bool enabled);
>
>  #else /* CONFIG_KASAN */
>
> -static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
> +static inline void kasan_unpoison_range(const void *address, size_t size) {}
>
>  static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
>
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 15f189bb8ec4..bee52236f09b 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -225,8 +225,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
>                 if (!s)
>                         continue;
>
> -               /* Clear the KASAN shadow of the stack. */
> -               kasan_unpoison_shadow(s->addr, THREAD_SIZE);
> +               /* Mark stack accessible for KASAN. */
> +               kasan_unpoison_range(s->addr, THREAD_SIZE);
>
>                 /* Clear stale pointers from reused stack. */
>                 memset(s->addr, 0, THREAD_SIZE);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index f5739be60edc..6adbf5891aff 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -109,7 +109,7 @@ void *memcpy(void *dest, const void *src, size_t len)
>   * Poisons the shadow memory for 'size' bytes starting from 'addr'.
>   * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
>   */
> -void kasan_poison_shadow(const void *address, size_t size, u8 value)
> +void poison_range(const void *address, size_t size, u8 value)
>  {
>         void *shadow_start, *shadow_end;
>
> @@ -130,7 +130,7 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
>         __memset(shadow_start, value, shadow_end - shadow_start);
>  }
>
> -void kasan_unpoison_shadow(const void *address, size_t size)
> +void unpoison_range(const void *address, size_t size)
>  {
>         u8 tag = get_tag(address);
>
> @@ -149,7 +149,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
>         if (is_kfence_address(address))
>                 return;
>
> -       kasan_poison_shadow(address, size, tag);
> +       poison_range(address, size, tag);
>
>         if (size & KASAN_SHADOW_MASK) {
>                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
> @@ -161,12 +161,17 @@ void kasan_unpoison_shadow(const void *address, size_t size)
>         }
>  }
>
> +void kasan_unpoison_range(const void *address, size_t size)
> +{
> +       unpoison_range(address, size);
> +}
> +
>  static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
>  {
>         void *base = task_stack_page(task);
>         size_t size = sp - base;
>
> -       kasan_unpoison_shadow(base, size);
> +       unpoison_range(base, size);
>  }
>
>  /* Unpoison the entire stack for a task. */
> @@ -185,7 +190,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
>          */
>         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
>
> -       kasan_unpoison_shadow(base, watermark - base);
> +       unpoison_range(base, watermark - base);
>  }
>
>  void kasan_alloc_pages(struct page *page, unsigned int order)
> @@ -199,13 +204,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order)
>         tag = random_tag();
>         for (i = 0; i < (1 << order); i++)
>                 page_kasan_tag_set(page + i, tag);
> -       kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
> +       unpoison_range(page_address(page), PAGE_SIZE << order);
>  }
>
>  void kasan_free_pages(struct page *page, unsigned int order)
>  {
>         if (likely(!PageHighMem(page)))
> -               kasan_poison_shadow(page_address(page),
> +               poison_range(page_address(page),
>                                 PAGE_SIZE << order,
>                                 KASAN_FREE_PAGE);
>  }
> @@ -297,18 +302,18 @@ void kasan_poison_slab(struct page *page)
>
>         for (i = 0; i < compound_nr(page); i++)
>                 page_kasan_tag_reset(page + i);
> -       kasan_poison_shadow(page_address(page), page_size(page),
> -                       KASAN_KMALLOC_REDZONE);
> +       poison_range(page_address(page), page_size(page),
> +                    KASAN_KMALLOC_REDZONE);
>  }
>
>  void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
>  {
> -       kasan_unpoison_shadow(object, cache->object_size);
> +       unpoison_range(object, cache->object_size);
>  }
>
>  void kasan_poison_object_data(struct kmem_cache *cache, void *object)
>  {
> -       kasan_poison_shadow(object,
> +       poison_range(object,
>                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
>                         KASAN_KMALLOC_REDZONE);
>  }
> @@ -424,7 +429,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>         }
>
>         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
> -       kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
> +       poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
>
>         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
>                         unlikely(!(cache->flags & SLAB_KASAN)))
> @@ -467,9 +472,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
>                 tag = assign_tag(cache, object, false, keep_tag);
>
>         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
> -       kasan_unpoison_shadow(set_tag(object, tag), size);
> -       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> -               KASAN_KMALLOC_REDZONE);
> +       unpoison_range(set_tag(object, tag), size);
> +       poison_range((void *)redzone_start, redzone_end - redzone_start,
> +                    KASAN_KMALLOC_REDZONE);
>
>         if (cache->flags & SLAB_KASAN)
>                 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
> @@ -508,9 +513,9 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
>                                 KASAN_SHADOW_SCALE_SIZE);
>         redzone_end = (unsigned long)ptr + page_size(page);
>
> -       kasan_unpoison_shadow(ptr, size);
> -       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> -               KASAN_PAGE_REDZONE);
> +       unpoison_range(ptr, size);
> +       poison_range((void *)redzone_start, redzone_end - redzone_start,
> +                    KASAN_PAGE_REDZONE);
>
>         return (void *)ptr;
>  }
> @@ -542,7 +547,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
>                         kasan_report_invalid_free(ptr, ip);
>                         return;
>                 }
> -               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
> +               poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
>         } else {
>                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
>         }
> @@ -728,7 +733,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
>          * // vmalloc() allocates memory
>          * // let a = area->addr
>          * // we reach kasan_populate_vmalloc
> -        * // and call kasan_unpoison_shadow:
> +        * // and call unpoison_range:
>          * STORE shadow(a), unpoison_val
>          * ...
>          * STORE shadow(a+99), unpoison_val     x = LOAD p
> @@ -763,7 +768,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
>                 return;
>
>         size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
> -       kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
> +       poison_range(start, size, KASAN_VMALLOC_INVALID);
>  }
>
>  void kasan_unpoison_vmalloc(const void *start, unsigned long size)
> @@ -771,7 +776,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size)
>         if (!is_vmalloc_or_module_addr(start))
>                 return;
>
> -       kasan_unpoison_shadow(start, size);
> +       unpoison_range(start, size);
>  }
>
>  static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index d6a386255007..cdc2d8112f3e 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -203,11 +203,11 @@ static void register_global(struct kasan_global *global)
>  {
>         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
>
> -       kasan_unpoison_shadow(global->beg, global->size);
> +       unpoison_range(global->beg, global->size);
>
> -       kasan_poison_shadow(global->beg + aligned_size,
> -               global->size_with_redzone - aligned_size,
> -               KASAN_GLOBAL_REDZONE);
> +       poison_range(global->beg + aligned_size,
> +                    global->size_with_redzone - aligned_size,
> +                    KASAN_GLOBAL_REDZONE);
>  }
>
>  void __asan_register_globals(struct kasan_global *globals, size_t size)
> @@ -286,13 +286,12 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
>
>         WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
>
> -       kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
> -                             size - rounded_down_size);
> -       kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
> -                       KASAN_ALLOCA_LEFT);
> -       kasan_poison_shadow(right_redzone,
> -                       padding_size + KASAN_ALLOCA_REDZONE_SIZE,
> -                       KASAN_ALLOCA_RIGHT);
> +       unpoison_range((const void *)(addr + rounded_down_size),
> +                      size - rounded_down_size);
> +       poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
> +                    KASAN_ALLOCA_LEFT);
> +       poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
> +                    KASAN_ALLOCA_RIGHT);
>  }
>  EXPORT_SYMBOL(__asan_alloca_poison);
>
> @@ -302,7 +301,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
>         if (unlikely(!stack_top || stack_top > stack_bottom))
>                 return;
>
> -       kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
> +       unpoison_range(stack_top, stack_bottom - stack_top);
>  }
>  EXPORT_SYMBOL(__asan_allocas_unpoison);
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index ac499456740f..42ab02c61331 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -150,7 +150,8 @@ static inline bool addr_has_shadow(const void *addr)
>         return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
>  }
>
> -void kasan_poison_shadow(const void *address, size_t size, u8 value);
> +void poison_range(const void *address, size_t size, u8 value);
> +void unpoison_range(const void *address, size_t size);
>
>  /**
>   * check_memory_region - Check memory region, and report if invalid access.
> diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
> index 5c8b08a25715..c0b3f327812b 100644
> --- a/mm/kasan/tags.c
> +++ b/mm/kasan/tags.c
> @@ -153,7 +153,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort);
>
>  void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
>  {
> -       kasan_poison_shadow((void *)addr, size, tag);
> +       poison_range((void *)addr, size, tag);
>  }
>  EXPORT_SYMBOL(__hwasan_tag_memory);
>
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 479d17b90155..0b5ae1819a8b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1179,7 +1179,7 @@ size_t ksize(const void *objp)
>          * We assume that ksize callers could use whole allocated area,
>          * so we need to unpoison this area.
>          */
> -       kasan_unpoison_shadow(objp, size);
> +       kasan_unpoison_range(objp, size);
>         return size;
>  }
>  EXPORT_SYMBOL(ksize);
> --
> 2.29.2.299.gdc1121823c-goog
>


-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2020-11-18 15:30 UTC|newest]

Thread overview: 155+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-13 22:15 [PATCH mm v10 00/42] kasan: add hardware tag-based mode for arm64 Andrey Konovalov
2020-11-13 22:15 ` Andrey Konovalov
2020-11-13 22:15 ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 01/42] kasan: drop unnecessary GPL text from comment headers Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 02/42] kasan: KASAN_VMALLOC depends on KASAN_GENERIC Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 03/42] kasan: group vmalloc code Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 04/42] kasan: shadow declarations only for software modes Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 05/42] kasan: rename (un)poison_shadow to (un)poison_range Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-18 15:30   ` Alexander Potapenko [this message]
2020-11-18 15:30     ` Alexander Potapenko
2020-11-18 15:30     ` Alexander Potapenko
2020-11-13 22:15 ` [PATCH mm v10 06/42] kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_* Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 07/42] kasan: only build init.c for software modes Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 08/42] kasan: split out shadow.c from common.c Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 09/42] kasan: define KASAN_MEMORY_PER_SHADOW_PAGE Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-18 15:36   ` Alexander Potapenko
2020-11-18 15:36     ` Alexander Potapenko
2020-11-18 15:36     ` Alexander Potapenko
2020-11-13 22:15 ` [PATCH mm v10 10/42] kasan: rename report and tags files Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 11/42] kasan: don't duplicate config dependencies Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 12/42] kasan: hide invalid free check implementation Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 13/42] kasan: decode stack frame only with KASAN_STACK_ENABLE Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 14/42] kasan, arm64: only init shadow for software modes Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 15/42] kasan, arm64: only use kasan_depth " Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 16/42] kasan, arm64: move initialization message Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 17/42] kasan, arm64: rename kasan_init_tags and mark as __init Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 18/42] kasan: rename addr_has_shadow to addr_has_metadata Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 19/42] kasan: rename print_shadow_for_address to print_memory_metadata Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 20/42] kasan: rename SHADOW layout macros to META Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 21/42] kasan: separate metadata_fetch_row for each mode Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 22/42] kasan, arm64: don't allow SW_TAGS with ARM64_MTE Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 23/42] kasan: introduce CONFIG_KASAN_HW_TAGS Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-18 16:04   ` Alexander Potapenko
2020-11-18 16:04     ` Alexander Potapenko
2020-11-18 16:04     ` Alexander Potapenko
2020-11-13 22:15 ` [PATCH mm v10 24/42] arm64: Enable armv8.5-a asm-arch option Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-18 15:48   ` Alexander Potapenko
2020-11-18 15:48     ` Alexander Potapenko
2020-11-18 15:48     ` Alexander Potapenko
2020-11-13 22:15 ` [PATCH mm v10 25/42] arm64: mte: Add in-kernel MTE helpers Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 26/42] arm64: mte: Reset the page tag in page->flags Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-14 12:43   ` Catalin Marinas
2020-11-14 12:43     ` Catalin Marinas
2020-11-13 22:15 ` [PATCH mm v10 27/42] arm64: mte: Add in-kernel tag fault handler Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 28/42] arm64: kasan: Allow enabling in-kernel MTE Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-14 12:47   ` Catalin Marinas
2020-11-14 12:47     ` Catalin Marinas
2020-11-13 22:15 ` [PATCH mm v10 29/42] arm64: mte: Convert gcr_user into an exclude mask Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 30/42] arm64: mte: Switch GCR_EL1 in kernel entry and exit Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15 ` [PATCH mm v10 31/42] kasan, mm: untag page address in free_reserved_area Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-13 22:15   ` Andrey Konovalov
2020-11-18 16:07   ` Alexander Potapenko
2020-11-18 16:07     ` Alexander Potapenko
2020-11-18 16:07     ` Alexander Potapenko
2020-11-13 22:16 ` [PATCH mm v10 32/42] arm64: kasan: Align allocations for HW_TAGS Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 33/42] arm64: kasan: Add arch layer for memory tagging helpers Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 34/42] kasan: define KASAN_GRANULE_SIZE for HW_TAGS Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 35/42] kasan, x86, s390: update undef CONFIG_KASAN Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 36/42] kasan, arm64: expand CONFIG_KASAN checks Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 37/42] kasan, arm64: implement HW_TAGS runtime Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 38/42] kasan, arm64: print report from tag fault handler Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 39/42] kasan, mm: reset tags when accessing metadata Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 40/42] kasan, arm64: enable CONFIG_KASAN_HW_TAGS Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16 ` [PATCH mm v10 41/42] kasan: add documentation for hardware tag-based mode Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-18 15:43   ` Alexander Potapenko
2020-11-18 15:43     ` Alexander Potapenko
2020-11-18 15:43     ` Alexander Potapenko
2020-11-13 22:16 ` [PATCH mm v10 42/42] kselftest/arm64: Check GCR_EL1 after context switch Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 22:16   ` Andrey Konovalov
2020-11-13 23:35 ` [PATCH mm v10 00/42] kasan: add hardware tag-based mode for arm64 Andrew Morton
2020-11-13 23:35   ` Andrew Morton
2020-11-16 14:48 ` Vincenzo Frascino
2020-11-16 14:48   ` Vincenzo Frascino

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAG_fn=Wy7fLJd46=N9U-yQAQreioEf2ny+CGNmhUVYpbWiXA1Q@mail.gmail.com' \
    --to=glider@google.com \
    --cc=Branislav.Rankov@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@google.com \
    --cc=aryabinin@virtuozzo.com \
    --cc=catalin.marinas@arm.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=eugenis@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vincenzo.frascino@arm.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.