All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	 David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	 Pekka Enberg <penberg@kernel.org>,
	Linux Memory Management List <linux-mm@kvack.org>,
	 Andrew Morton <akpm@linux-foundation.org>,
	patches@lists.linux.dev,
	 Andrey Ryabinin <ryabinin.a.a@gmail.com>,
	Alexander Potapenko <glider@google.com>,
	 Dmitry Vyukov <dvyukov@google.com>,
	kasan-dev <kasan-dev@googlegroups.com>
Subject: Re: [PATCH v2 25/33] mm/kasan: Convert to struct folio and struct slab
Date: Thu, 2 Dec 2021 18:16:50 +0100	[thread overview]
Message-ID: <CA+fCnZd8oD2nEB0C+D73mQqJobaVY_82gnU9Lfu_JydDZ21sQQ@mail.gmail.com> (raw)
In-Reply-To: <20211201181510.18784-26-vbabka@suse.cz>

On Wed, Dec 1, 2021 at 7:15 PM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> KASAN accesses some slab related struct page fields so we need to convert it
> to struct slab. Some places are a bit simplified thanks to kasan_addr_to_slab()
> encapsulating the PageSlab flag check through virt_to_slab().
> When resolving object address to either a real slab or a large kmalloc, use
> struct folio as the intermediate type for testing the slab flag to avoid
> unnecessary implicit compound_head().
>
> [ vbabka@suse.cz: use struct folio, adjust to differences in previous patches ]
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: <kasan-dev@googlegroups.com>
> ---
>  include/linux/kasan.h  |  9 +++++----
>  mm/kasan/common.c      | 23 +++++++++++++----------
>  mm/kasan/generic.c     |  8 ++++----
>  mm/kasan/kasan.h       |  1 +
>  mm/kasan/quarantine.c  |  2 +-
>  mm/kasan/report.c      | 13 +++++++++++--
>  mm/kasan/report_tags.c | 10 +++++-----
>  mm/slab.c              |  2 +-
>  mm/slub.c              |  2 +-
>  9 files changed, 42 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index d8783b682669..fb78108d694e 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -9,6 +9,7 @@
>
>  struct kmem_cache;
>  struct page;
> +struct slab;
>  struct vm_struct;
>  struct task_struct;
>
> @@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
>         return 0;
>  }
>
> -void __kasan_poison_slab(struct page *page);
> -static __always_inline void kasan_poison_slab(struct page *page)
> +void __kasan_poison_slab(struct slab *slab);
> +static __always_inline void kasan_poison_slab(struct slab *slab)
>  {
>         if (kasan_enabled())
> -               __kasan_poison_slab(page);
> +               __kasan_poison_slab(slab);
>  }
>
>  void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
> @@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
>                                       slab_flags_t *flags) {}
>  static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
>  static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> -static inline void kasan_poison_slab(struct page *page) {}
> +static inline void kasan_poison_slab(struct slab *slab) {}
>  static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
>                                         void *object) {}
>  static inline void kasan_poison_object_data(struct kmem_cache *cache,
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 6a1cd2d38bff..7c06db78a76c 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
>  }
>  #endif
>
> -void __kasan_poison_slab(struct page *page)
> +void __kasan_poison_slab(struct slab *slab)
>  {
> +       struct page *page = slab_page(slab);
>         unsigned long i;
>
>         for (i = 0; i < compound_nr(page); i++)
> @@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
>
>  void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
>  {
> -       struct page *page;
> +       struct folio *folio;
>
> -       page = virt_to_head_page(ptr);
> +       folio = virt_to_folio(ptr);
>
>         /*
>          * Even though this function is only called for kmem_cache_alloc and
> @@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
>          * !PageSlab() when the size provided to kmalloc is larger than
>          * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
>          */
> -       if (unlikely(!PageSlab(page))) {
> +       if (unlikely(!folio_test_slab(folio))) {
>                 if (____kasan_kfree_large(ptr, ip))
>                         return;
> -               kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
> +               kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
>         } else {
> -               ____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
> +               struct slab *slab = folio_slab(folio);
> +
> +               ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
>         }
>  }
>
> @@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
>
>  void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
>  {
> -       struct page *page;
> +       struct slab *slab;
>
>         if (unlikely(object == ZERO_SIZE_PTR))
>                 return (void *)object;
> @@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
>          */
>         kasan_unpoison(object, size, false);
>
> -       page = virt_to_head_page(object);
> +       slab = virt_to_slab(object);
>
>         /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
> -       if (unlikely(!PageSlab(page)))
> +       if (unlikely(!slab))
>                 return __kasan_kmalloc_large(object, size, flags);
>         else
> -               return ____kasan_kmalloc(page->slab_cache, object, size, flags);
> +               return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
>  }
>
>  bool __kasan_check_byte(const void *address, unsigned long ip)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 5d0b79416c4e..a25ad4090615 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);
>
>  static void __kasan_record_aux_stack(void *addr, bool can_alloc)
>  {
> -       struct page *page = kasan_addr_to_page(addr);
> +       struct slab *slab = kasan_addr_to_slab(addr);
>         struct kmem_cache *cache;
>         struct kasan_alloc_meta *alloc_meta;
>         void *object;
>
> -       if (is_kfence_address(addr) || !(page && PageSlab(page)))
> +       if (is_kfence_address(addr) || !slab)
>                 return;
>
> -       cache = page->slab_cache;
> -       object = nearest_obj(cache, page_slab(page), addr);
> +       cache = slab->slab_cache;
> +       object = nearest_obj(cache, slab, addr);
>         alloc_meta = kasan_get_alloc_meta(cache, object);
>         if (!alloc_meta)
>                 return;
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index aebd8df86a1f..c17fa8d26ffe 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size,
>  void kasan_report_invalid_free(void *object, unsigned long ip);
>
>  struct page *kasan_addr_to_page(const void *addr);
> +struct slab *kasan_addr_to_slab(const void *addr);
>
>  depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
>  void kasan_set_track(struct kasan_track *track, gfp_t flags);
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index d8ccff4c1275..587da8995f2d 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;
>
>  static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
>  {
> -       return virt_to_head_page(qlink)->slab_cache;
> +       return virt_to_slab(qlink)->slab_cache;
>  }
>
>  static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index e00999dc6499..3ad9624dcc56 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -150,6 +150,14 @@ struct page *kasan_addr_to_page(const void *addr)
>         return NULL;
>  }
>
> +struct slab *kasan_addr_to_slab(const void *addr)
> +{
> +       if ((addr >= (void *)PAGE_OFFSET) &&
> +                       (addr < high_memory))
> +               return virt_to_slab(addr);
> +       return NULL;
> +}
> +
>  static void describe_object_addr(struct kmem_cache *cache, void *object,
>                                 const void *addr)
>  {
> @@ -248,8 +256,9 @@ static void print_address_description(void *addr, u8 tag)
>         pr_err("\n");
>
>         if (page && PageSlab(page)) {
> -               struct kmem_cache *cache = page->slab_cache;
> -               void *object = nearest_obj(cache, page_slab(page),      addr);
> +               struct slab *slab = page_slab(page);
> +               struct kmem_cache *cache = slab->slab_cache;
> +               void *object = nearest_obj(cache, slab, addr);
>
>                 describe_object(cache, object, addr, tag);
>         }
> diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
> index 06c21dd77493..1b41de88c53e 100644
> --- a/mm/kasan/report_tags.c
> +++ b/mm/kasan/report_tags.c
> @@ -12,7 +12,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
>  #ifdef CONFIG_KASAN_TAGS_IDENTIFY
>         struct kasan_alloc_meta *alloc_meta;
>         struct kmem_cache *cache;
> -       struct page *page;
> +       struct slab *slab;
>         const void *addr;
>         void *object;
>         u8 tag;
> @@ -20,10 +20,10 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
>
>         tag = get_tag(info->access_addr);
>         addr = kasan_reset_tag(info->access_addr);
> -       page = kasan_addr_to_page(addr);
> -       if (page && PageSlab(page)) {
> -               cache = page->slab_cache;
> -               object = nearest_obj(cache, page_slab(page), (void *)addr);
> +       slab = kasan_addr_to_slab(addr);
> +       if (slab) {
> +               cache = slab->slab_cache;
> +               object = nearest_obj(cache, slab, (void *)addr);
>                 alloc_meta = kasan_get_alloc_meta(cache, object);
>
>                 if (alloc_meta) {
> diff --git a/mm/slab.c b/mm/slab.c
> index 785fffd527fe..fed55fa1b7d0 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -2605,7 +2605,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep,
>          * page_address() in the latter returns a non-tagged pointer,
>          * as it should be for slab pages.
>          */
> -       kasan_poison_slab(slab_page(slab));
> +       kasan_poison_slab(slab);
>
>         /* Get slab management. */
>         freelist = alloc_slabmgmt(cachep, slab, offset,
> diff --git a/mm/slub.c b/mm/slub.c
> index 61aaaa662c5e..58f0d499a293 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1961,7 +1961,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
>
>         slab->slab_cache = s;
>
> -       kasan_poison_slab(slab_page(slab));
> +       kasan_poison_slab(slab);
>
>         start = slab_address(slab);
>
> --
> 2.33.1
>

Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>

Great job with the overall struct page refactoring!

Thanks!

  reply	other threads:[~2021-12-02 17:17 UTC|newest]

Thread overview: 148+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-01 18:14 [PATCH v2 00/33] Separate struct slab from struct page Vlastimil Babka
2021-12-01 18:14 ` Vlastimil Babka
2021-12-01 18:14 ` Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 01/33] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2021-12-14 14:20   ` Johannes Weiner
2021-12-14 14:27     ` Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 02/33] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 03/33] mm/slub: Make object_err() static Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 04/33] mm: Split slab into its own type Vlastimil Babka
2021-12-14 14:24   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 05/33] mm: Add account_slab() and unaccount_slab() Vlastimil Babka
2021-12-14 14:25   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 06/33] mm: Convert virt_to_cache() to use struct slab Vlastimil Babka
2021-12-14 14:26   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 07/33] mm: Convert __ksize() to " Vlastimil Babka
2021-12-14 14:28   ` Johannes Weiner
2021-12-01 18:14 ` [PATCH v2 08/33] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 09/33] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 10/33] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 11/33] mm/slub: Convert kfree() " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 12/33] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 13/33] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 14/33] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 15/33] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 16/33] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 17/33] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 18/33] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 19/33] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 20/33] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 21/33] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2021-12-01 18:14 ` [PATCH v2 22/33] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2021-12-01 18:14   ` Vlastimil Babka
2021-12-02 17:16   ` Andrey Konovalov
2021-12-02 17:16     ` Andrey Konovalov
2021-12-14 14:31   ` Johannes Weiner
2021-12-14 14:31     ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 23/33] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2021-12-01 18:15   ` Vlastimil Babka
2021-12-14 14:43   ` Johannes Weiner
2021-12-14 14:43     ` Johannes Weiner
2021-12-20 23:31     ` Vlastimil Babka
2021-12-20 23:31       ` Vlastimil Babka
2021-12-01 18:15 ` [PATCH v2 24/33] mm/slob: Convert SLOB to use " Vlastimil Babka
2021-12-10 10:44   ` Hyeonggon Yoo
2021-12-10 11:44     ` Vlastimil Babka
2021-12-10 15:29       ` Hyeonggon Yoo
2021-12-10 18:09         ` Vlastimil Babka
2021-12-11 10:54           ` Hyeonggon Yoo
2021-12-01 18:15 ` [PATCH v2 25/33] mm/kasan: Convert to struct folio and " Vlastimil Babka
2021-12-02 17:16   ` Andrey Konovalov [this message]
2021-12-01 18:15 ` [PATCH v2 26/33] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2021-12-01 18:15 ` [PATCH v2 27/33] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2021-12-01 23:34   ` Minchan Kim
2021-12-14 14:58   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 28/33] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2021-12-14 14:59   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 29/33] iommu: Use put_pages_list Vlastimil Babka
2021-12-01 18:15   ` Vlastimil Babka
2021-12-01 19:07   ` Matthew Wilcox
2021-12-01 19:07     ` Matthew Wilcox
2021-12-01 19:45     ` Robin Murphy
2021-12-01 19:45       ` Robin Murphy
2021-12-01 18:15 ` [PATCH v2 30/33] mm: Remove slab from struct page Vlastimil Babka
2021-12-14 14:46   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 31/33] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2021-12-10 16:37   ` Hyeonggon Yoo
2021-12-10 18:26     ` Vlastimil Babka
2021-12-11 11:55       ` Hyeonggon Yoo
2021-12-11 16:52         ` Matthew Wilcox
2021-12-12  5:54           ` Hyeonggon Yoo
2021-12-11 16:23       ` Matthew Wilcox
2021-12-12  6:00         ` Hyeonggon Yoo
2021-12-12  6:52   ` [PATCH] mm/slob: Remove unnecessary page_mapcount_reset() function call Hyeonggon Yoo
2021-12-14 11:51     ` Vlastimil Babka
2021-12-01 18:15 ` [PATCH v2 32/33] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2021-12-14 15:06   ` Johannes Weiner
2021-12-01 18:15 ` [PATCH v2 33/33] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2021-12-01 18:39 ` slab tree for next Vlastimil Babka
2021-12-01 20:34   ` Vlastimil Babka
2021-12-02 16:36     ` Vlastimil Babka
2021-12-02 20:39       ` Stephen Rothwell
2022-01-04  0:21   ` Vlastimil Babka
2022-01-04  8:44     ` Stephen Rothwell
2023-08-29  9:55     ` Vlastimil Babka
2023-08-29 21:33       ` Stephen Rothwell
2021-12-02 12:25 ` [PATCH v2 00/33] Separate struct slab from struct page Vlastimil Babka
2021-12-02 12:25   ` Vlastimil Babka
2021-12-02 12:25   ` Vlastimil Babka
2021-12-14 12:57 ` Vlastimil Babka
2021-12-14 12:57   ` Vlastimil Babka
2021-12-14 12:57   ` Vlastimil Babka
2021-12-14 14:38   ` Hyeonggon Yoo
2021-12-14 14:38     ` Hyeonggon Yoo
2021-12-14 14:38     ` Hyeonggon Yoo
2021-12-14 14:43     ` Vlastimil Babka
2021-12-14 14:43       ` Vlastimil Babka
2021-12-14 14:43       ` Vlastimil Babka
2021-12-15  3:47       ` Hyeonggon Yoo
2021-12-15  3:47         ` Hyeonggon Yoo
2021-12-15  3:47         ` Hyeonggon Yoo
2021-12-15  1:03   ` Roman Gushchin
2021-12-15  1:03     ` Roman Gushchin via iommu
2021-12-15  1:03     ` Roman Gushchin via iommu
2021-12-15 23:38     ` Roman Gushchin
2021-12-15 23:38       ` Roman Gushchin
2021-12-15 23:38       ` Roman Gushchin via iommu
2021-12-16  9:19       ` Vlastimil Babka
2021-12-16  9:19         ` Vlastimil Babka
2021-12-16  9:19         ` Vlastimil Babka
2021-12-20  0:47       ` Vlastimil Babka
2021-12-20  0:47         ` Vlastimil Babka
2021-12-20  0:47         ` Vlastimil Babka
2021-12-20  1:42         ` Matthew Wilcox
2021-12-20  1:42           ` Matthew Wilcox
2021-12-20  1:42           ` Matthew Wilcox
2021-12-20  0:24     ` Vlastimil Babka
2021-12-20  0:24       ` Vlastimil Babka
2021-12-20  0:24       ` Vlastimil Babka
2021-12-16 15:00   ` Hyeonggon Yoo
2021-12-16 15:00     ` Hyeonggon Yoo
2021-12-16 15:00     ` Hyeonggon Yoo
2021-12-20 23:58     ` Vlastimil Babka
2021-12-20 23:58       ` Vlastimil Babka
2021-12-20 23:58       ` Vlastimil Babka
2021-12-21 17:25       ` Robin Murphy
2021-12-21 17:25         ` Robin Murphy
2021-12-21 17:25         ` Robin Murphy
2021-12-22  7:36       ` Hyeonggon Yoo
2021-12-22  7:36         ` Hyeonggon Yoo
2021-12-22  7:36         ` Hyeonggon Yoo
2021-12-22 16:56   ` Vlastimil Babka
2021-12-22 16:56     ` Vlastimil Babka
2021-12-22 16:56     ` Vlastimil Babka
2021-12-25  9:16     ` Hyeonggon Yoo
2021-12-25  9:16       ` Hyeonggon Yoo
2021-12-25  9:16       ` Hyeonggon Yoo
2021-12-25 17:53       ` Matthew Wilcox
2021-12-25 17:53         ` Matthew Wilcox
2021-12-25 17:53         ` Matthew Wilcox
2021-12-27  2:43         ` Hyeonggon Yoo
2021-12-27  2:43           ` Hyeonggon Yoo
2021-12-27  2:43           ` Hyeonggon Yoo
2021-12-29 11:22     ` Hyeonggon Yoo
2021-12-29 11:22       ` Hyeonggon Yoo
2021-12-29 11:22       ` Hyeonggon Yoo
2022-01-03 17:56       ` Vlastimil Babka
2022-01-03 17:56         ` Vlastimil Babka
2022-01-03 17:56         ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CA+fCnZd8oD2nEB0C+D73mQqJobaVY_82gnU9Lfu_JydDZ21sQQ@mail.gmail.com \
    --to=andreyknvl@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-mm@kvack.org \
    --cc=patches@lists.linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=ryabinin.a.a@gmail.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.