From: Marco Elver <elver@google.com> To: elver@google.com, glider@google.com, akpm@linux-foundation.org, catalin.marinas@arm.com, cl@linux.com, rientjes@google.com, iamjoonsoo.kim@lge.com, mark.rutland@arm.com, penberg@kernel.org Cc: hpa@zytor.com, paulmck@kernel.org, andreyknvl@google.com, aryabinin@virtuozzo.com, luto@kernel.org, bp@alien8.de, dave.hansen@linux.intel.com, dvyukov@google.com, edumazet@google.com, gregkh@linuxfoundation.org, mingo@redhat.com, jannh@google.com, corbet@lwn.net, keescook@chromium.org, peterz@infradead.org, cai@lca.pw, tglx@linutronix.de, will@kernel.org, x86@kernel.org, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com, linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org Subject: [PATCH RFC 05/10] mm, kfence: insert KFENCE hooks for SLUB Date: Mon, 7 Sep 2020 15:40:50 +0200 [thread overview] Message-ID: <20200907134055.2878499-6-elver@google.com> (raw) In-Reply-To: <20200907134055.2878499-1-elver@google.com> From: Alexander Potapenko <glider@google.com> Inserts KFENCE hooks into the SLUB allocator. We note the addition of the 'orig_size' argument to slab_alloc*() functions, to be able to pass the originally requested size to KFENCE. When KFENCE is disabled, there is no additional overhead, since these functions are __always_inline. Co-developed-by: Marco Elver <elver@google.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Alexander Potapenko <glider@google.com> --- mm/slub.c | 72 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 19 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index d4177aecedf6..5c5a13a7857c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -27,6 +27,7 @@ #include <linux/ctype.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> +#include <linux/kfence.h> #include <linux/memory.h> #include <linux/math64.h> #include <linux/fault-inject.h> @@ -1557,6 +1558,11 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; int rsize; + if (is_kfence_address(next)) { + slab_free_hook(s, next); + return true; + } + /* Head and tail of the reconstructed freelist */ *head = NULL; *tail = NULL; @@ -2660,7 +2666,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + size_t orig_size) { void *freelist; struct page *page; @@ -2763,7 +2770,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * cpu changes by refetching the per cpu area pointer. */ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + size_t orig_size) { void *p; unsigned long flags; @@ -2778,7 +2786,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c); + p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); local_irq_restore(flags); return p; } @@ -2805,7 +2813,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc_node(struct kmem_cache *s, - gfp_t gfpflags, int node, unsigned long addr) + gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) { void *object; struct kmem_cache_cpu *c; @@ -2816,6 +2824,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) return NULL; + + object = kfence_alloc(s, orig_size, gfpflags); + if (unlikely(object)) + goto out; + redo: /* * Must read kmem_cache cpu data via this cpu ptr. Preemption is @@ -2853,7 +2866,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, object = c->freelist; page = c->page; if (unlikely(!object || !node_match(page, node))) { - object = __slab_alloc(s, gfpflags, node, addr, c); + object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); stat(s, ALLOC_SLOWPATH); } else { void *next_object = get_freepointer_safe(s, object); @@ -2889,20 +2902,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(object, 0, s->object_size); +out: slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); return object; } static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, unsigned long addr) + gfp_t gfpflags, unsigned long addr, size_t orig_size) { - return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); @@ -2914,7 +2928,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; @@ -2925,7 +2939,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -2939,7 +2953,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -2973,6 +2987,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); + if (kfence_free(head)) + return; + if (kmem_cache_debug(s) && !free_debug_processing(s, page, head, tail, cnt, addr)) return; @@ -3216,6 +3233,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, df->s = cache_from_obj(s, object); /* Support for memcg */ } + if (is_kfence_address(object)) { + slab_free_hook(df->s, object); + WARN_ON(!kfence_free(object)); + p[size] = NULL; /* mark object processed */ + return size; + } + /* Start new detached freelist */ df->page = page; set_freepointer(df->s, object, NULL); @@ -3290,8 +3314,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c = this_cpu_ptr(s->cpu_slab); for (i = 0; i < size; i++) { - void *object = c->freelist; + void *object = kfence_alloc(s, s->object_size, flags); + if (unlikely(object)) { + p[i] = object; + continue; + } + + object = c->freelist; if (unlikely(!object)) { /* * We may have removed an object from c->freelist using @@ -3307,7 +3337,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c); + _RET_IP_, c, size); if (unlikely(!p[i])) goto error; @@ -3962,7 +3992,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, _RET_IP_); + ret = slab_alloc(s, flags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -4010,7 +4040,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, flags, node, _RET_IP_); + ret = slab_alloc_node(s, flags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4036,6 +4066,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, struct kmem_cache *s; unsigned int offset; size_t object_size; + bool is_kfence = is_kfence_address(ptr); ptr = kasan_reset_tag(ptr); @@ -4048,10 +4079,13 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, to_user, 0, n); /* Find offset within object. */ - offset = (ptr - page_address(page)) % s->size; + if (is_kfence) + offset = ptr - kfence_object_start(ptr); + else + offset = (ptr - page_address(page)) % s->size; /* Adjust for redzone and reject if within the redzone. */ - if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { + if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { if (offset < s->red_left_pad) usercopy_abort("SLUB object in left red zone", s->name, to_user, offset, n); @@ -4460,7 +4494,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, caller); + ret = slab_alloc(s, gfpflags, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4491,7 +4525,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, gfpflags, node, caller); + ret = slab_alloc_node(s, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); -- 2.28.0.526.ge36021eeef-goog
WARNING: multiple messages have this Message-ID (diff)
From: Marco Elver <elver@google.com> To: elver@google.com, glider@google.com, akpm@linux-foundation.org, catalin.marinas@arm.com, cl@linux.com, rientjes@google.com, iamjoonsoo.kim@lge.com, mark.rutland@arm.com, penberg@kernel.org Cc: linux-doc@vger.kernel.org, peterz@infradead.org, dave.hansen@linux.intel.com, linux-mm@kvack.org, edumazet@google.com, hpa@zytor.com, will@kernel.org, corbet@lwn.net, x86@kernel.org, kasan-dev@googlegroups.com, mingo@redhat.com, linux-arm-kernel@lists.infradead.org, aryabinin@virtuozzo.com, keescook@chromium.org, paulmck@kernel.org, jannh@google.com, andreyknvl@google.com, cai@lca.pw, luto@kernel.org, tglx@linutronix.de, dvyukov@google.com, gregkh@linuxfoundation.org, linux-kernel@vger.kernel.org, bp@alien8.de Subject: [PATCH RFC 05/10] mm, kfence: insert KFENCE hooks for SLUB Date: Mon, 7 Sep 2020 15:40:50 +0200 [thread overview] Message-ID: <20200907134055.2878499-6-elver@google.com> (raw) In-Reply-To: <20200907134055.2878499-1-elver@google.com> From: Alexander Potapenko <glider@google.com> Inserts KFENCE hooks into the SLUB allocator. We note the addition of the 'orig_size' argument to slab_alloc*() functions, to be able to pass the originally requested size to KFENCE. When KFENCE is disabled, there is no additional overhead, since these functions are __always_inline. Co-developed-by: Marco Elver <elver@google.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Alexander Potapenko <glider@google.com> --- mm/slub.c | 72 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 19 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index d4177aecedf6..5c5a13a7857c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -27,6 +27,7 @@ #include <linux/ctype.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> +#include <linux/kfence.h> #include <linux/memory.h> #include <linux/math64.h> #include <linux/fault-inject.h> @@ -1557,6 +1558,11 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; int rsize; + if (is_kfence_address(next)) { + slab_free_hook(s, next); + return true; + } + /* Head and tail of the reconstructed freelist */ *head = NULL; *tail = NULL; @@ -2660,7 +2666,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + size_t orig_size) { void *freelist; struct page *page; @@ -2763,7 +2770,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, * cpu changes by refetching the per cpu area pointer. */ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + size_t orig_size) { void *p; unsigned long flags; @@ -2778,7 +2786,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c); + p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); local_irq_restore(flags); return p; } @@ -2805,7 +2813,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc_node(struct kmem_cache *s, - gfp_t gfpflags, int node, unsigned long addr) + gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) { void *object; struct kmem_cache_cpu *c; @@ -2816,6 +2824,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) return NULL; + + object = kfence_alloc(s, orig_size, gfpflags); + if (unlikely(object)) + goto out; + redo: /* * Must read kmem_cache cpu data via this cpu ptr. Preemption is @@ -2853,7 +2866,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, object = c->freelist; page = c->page; if (unlikely(!object || !node_match(page, node))) { - object = __slab_alloc(s, gfpflags, node, addr, c); + object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); stat(s, ALLOC_SLOWPATH); } else { void *next_object = get_freepointer_safe(s, object); @@ -2889,20 +2902,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(object, 0, s->object_size); +out: slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); return object; } static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, unsigned long addr) + gfp_t gfpflags, unsigned long addr, size_t orig_size) { - return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); @@ -2914,7 +2928,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; @@ -2925,7 +2939,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -2939,7 +2953,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -2973,6 +2987,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); + if (kfence_free(head)) + return; + if (kmem_cache_debug(s) && !free_debug_processing(s, page, head, tail, cnt, addr)) return; @@ -3216,6 +3233,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, df->s = cache_from_obj(s, object); /* Support for memcg */ } + if (is_kfence_address(object)) { + slab_free_hook(df->s, object); + WARN_ON(!kfence_free(object)); + p[size] = NULL; /* mark object processed */ + return size; + } + /* Start new detached freelist */ df->page = page; set_freepointer(df->s, object, NULL); @@ -3290,8 +3314,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c = this_cpu_ptr(s->cpu_slab); for (i = 0; i < size; i++) { - void *object = c->freelist; + void *object = kfence_alloc(s, s->object_size, flags); + if (unlikely(object)) { + p[i] = object; + continue; + } + + object = c->freelist; if (unlikely(!object)) { /* * We may have removed an object from c->freelist using @@ -3307,7 +3337,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c); + _RET_IP_, c, size); if (unlikely(!p[i])) goto error; @@ -3962,7 +3992,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, _RET_IP_); + ret = slab_alloc(s, flags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -4010,7 +4040,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, flags, node, _RET_IP_); + ret = slab_alloc_node(s, flags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4036,6 +4066,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, struct kmem_cache *s; unsigned int offset; size_t object_size; + bool is_kfence = is_kfence_address(ptr); ptr = kasan_reset_tag(ptr); @@ -4048,10 +4079,13 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, to_user, 0, n); /* Find offset within object. */ - offset = (ptr - page_address(page)) % s->size; + if (is_kfence) + offset = ptr - kfence_object_start(ptr); + else + offset = (ptr - page_address(page)) % s->size; /* Adjust for redzone and reject if within the redzone. */ - if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { + if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { if (offset < s->red_left_pad) usercopy_abort("SLUB object in left red zone", s->name, to_user, offset, n); @@ -4460,7 +4494,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, caller); + ret = slab_alloc(s, gfpflags, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4491,7 +4525,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, gfpflags, node, caller); + ret = slab_alloc_node(s, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); -- 2.28.0.526.ge36021eeef-goog _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2020-09-07 17:24 UTC|newest] Thread overview: 152+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-09-07 13:40 [PATCH RFC 00/10] KFENCE: A low-overhead sampling-based memory safety error detector Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 01/10] mm: add Kernel Electric-Fence infrastructure Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 15:41 ` Jonathan Cameron 2020-09-07 15:41 ` Jonathan Cameron 2020-09-07 16:38 ` Marco Elver 2020-09-07 16:38 ` Marco Elver 2020-09-07 16:38 ` Marco Elver 2020-09-10 14:57 ` Dmitry Vyukov 2020-09-10 14:57 ` Dmitry Vyukov 2020-09-10 14:57 ` Dmitry Vyukov 2020-09-10 15:06 ` Marco Elver 2020-09-10 15:06 ` Marco Elver 2020-09-10 15:06 ` Marco Elver 2020-09-10 15:48 ` Dmitry Vyukov 2020-09-10 15:48 ` Dmitry Vyukov 2020-09-10 15:48 ` Dmitry Vyukov 2020-09-10 16:22 ` Marco Elver 2020-09-10 16:22 ` Marco Elver 2020-09-10 16:22 ` Marco Elver 2020-09-10 15:42 ` Dmitry Vyukov 2020-09-10 15:42 ` Dmitry Vyukov 2020-09-10 15:42 ` Dmitry Vyukov 2020-09-10 16:19 ` Alexander Potapenko 2020-09-10 16:19 ` Alexander Potapenko 2020-09-10 16:19 ` Alexander Potapenko 2020-09-10 17:11 ` Dmitry Vyukov 2020-09-10 17:11 ` Dmitry Vyukov 2020-09-10 17:11 ` Dmitry Vyukov 2020-09-10 17:41 ` Marco Elver 2020-09-10 17:41 ` Marco Elver 2020-09-10 17:41 ` Marco Elver 2020-09-10 20:25 ` Paul E. McKenney 2020-09-10 20:25 ` Paul E. McKenney 2020-09-15 13:57 ` SeongJae Park 2020-09-15 13:57 ` SeongJae Park 2020-09-15 14:14 ` Marco Elver 2020-09-15 14:14 ` Marco Elver 2020-09-15 14:26 ` SeongJae Park 2020-09-15 14:26 ` SeongJae Park 2020-09-07 13:40 ` [PATCH RFC 02/10] x86, kfence: enable KFENCE for x86 Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 17:31 ` kernel test robot 2020-09-07 13:40 ` [PATCH RFC 03/10] arm64, kfence: enable KFENCE for ARM64 Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-09 15:13 ` Marco Elver 2020-09-09 15:13 ` Marco Elver 2020-09-09 15:13 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-11 7:17 ` Dmitry Vyukov 2020-09-11 7:17 ` Dmitry Vyukov 2020-09-11 7:17 ` Dmitry Vyukov 2020-09-11 12:24 ` Marco Elver 2020-09-11 12:24 ` Marco Elver 2020-09-11 12:24 ` Marco Elver 2020-09-11 13:03 ` Dmitry Vyukov 2020-09-11 13:03 ` Dmitry Vyukov 2020-09-11 13:03 ` Dmitry Vyukov 2020-09-07 13:40 ` Marco Elver [this message] 2020-09-07 13:40 ` [PATCH RFC 05/10] mm, kfence: insert KFENCE hooks for SLUB Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 06/10] kfence, kasan: make KFENCE compatible with KASAN Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 16:11 ` kernel test robot 2020-09-11 7:04 ` Dmitry Vyukov 2020-09-11 7:04 ` Dmitry Vyukov 2020-09-11 7:04 ` Dmitry Vyukov 2020-09-11 13:00 ` Marco Elver 2020-09-11 13:00 ` Marco Elver 2020-09-11 13:00 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 07/10] kfence, kmemleak: make KFENCE compatible with KMEMLEAK Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-08 11:53 ` Catalin Marinas 2020-09-08 11:53 ` Catalin Marinas 2020-09-08 12:29 ` Alexander Potapenko 2020-09-08 12:29 ` Alexander Potapenko 2020-09-08 12:29 ` Alexander Potapenko 2020-09-07 13:40 ` [PATCH RFC 08/10] kfence, lockdep: make KFENCE compatible with lockdep Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 09/10] kfence, Documentation: add KFENCE documentation Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 15:33 ` Andrey Konovalov 2020-09-07 15:33 ` Andrey Konovalov 2020-09-07 15:33 ` Andrey Konovalov 2020-09-07 16:33 ` Marco Elver 2020-09-07 16:33 ` Marco Elver 2020-09-07 16:33 ` Marco Elver 2020-09-07 17:55 ` Andrey Konovalov 2020-09-07 17:55 ` Andrey Konovalov 2020-09-07 17:55 ` Andrey Konovalov 2020-09-07 18:16 ` Marco Elver 2020-09-07 18:16 ` Marco Elver 2020-09-07 18:16 ` Marco Elver 2020-09-08 15:54 ` Dave Hansen 2020-09-08 15:54 ` Dave Hansen 2020-09-08 16:14 ` Marco Elver 2020-09-08 16:14 ` Marco Elver 2020-09-11 7:14 ` Dmitry Vyukov 2020-09-11 7:14 ` Dmitry Vyukov 2020-09-11 7:14 ` Dmitry Vyukov 2020-09-11 7:46 ` Marco Elver 2020-09-11 7:46 ` Marco Elver 2020-09-11 7:46 ` Marco Elver 2020-09-07 13:40 ` [PATCH RFC 10/10] kfence: add test suite Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 13:40 ` Marco Elver 2020-09-07 18:37 ` kernel test robot 2020-09-08 11:48 ` [PATCH RFC 00/10] KFENCE: A low-overhead sampling-based memory safety error detector Vlastimil Babka 2020-09-08 11:48 ` Vlastimil Babka 2020-09-08 12:16 ` Alexander Potapenko 2020-09-08 12:16 ` Alexander Potapenko 2020-09-08 12:16 ` Alexander Potapenko 2020-09-08 14:40 ` Vlastimil Babka 2020-09-08 14:40 ` Vlastimil Babka 2020-09-08 15:21 ` Marco Elver 2020-09-08 15:21 ` Marco Elver 2020-09-08 14:52 ` Dave Hansen 2020-09-08 14:52 ` Dave Hansen 2020-09-08 15:31 ` Marco Elver 2020-09-08 15:31 ` Marco Elver 2020-09-08 15:36 ` Vlastimil Babka 2020-09-08 15:36 ` Vlastimil Babka 2020-09-08 15:56 ` Marco Elver 2020-09-08 15:56 ` Marco Elver 2020-09-11 7:35 ` Dmitry Vyukov 2020-09-11 7:35 ` Dmitry Vyukov 2020-09-11 7:35 ` Dmitry Vyukov 2020-09-11 12:03 ` Marco Elver 2020-09-11 12:03 ` Marco Elver 2020-09-11 12:03 ` Marco Elver 2020-09-11 13:09 ` Dmitry Vyukov 2020-09-11 13:09 ` Dmitry Vyukov 2020-09-11 13:09 ` Dmitry Vyukov 2020-09-11 13:33 ` Marco Elver 2020-09-11 13:33 ` Marco Elver 2020-09-11 13:33 ` Marco Elver 2020-09-11 16:33 ` Marco Elver 2020-09-11 16:33 ` Marco Elver 2020-09-11 16:33 ` Marco Elver 2020-09-08 15:37 ` Dave Hansen 2020-09-08 15:37 ` Dave Hansen
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20200907134055.2878499-6-elver@google.com \ --to=elver@google.com \ --cc=akpm@linux-foundation.org \ --cc=andreyknvl@google.com \ --cc=aryabinin@virtuozzo.com \ --cc=bp@alien8.de \ --cc=cai@lca.pw \ --cc=catalin.marinas@arm.com \ --cc=cl@linux.com \ --cc=corbet@lwn.net \ --cc=dave.hansen@linux.intel.com \ --cc=dvyukov@google.com \ --cc=edumazet@google.com \ --cc=glider@google.com \ --cc=gregkh@linuxfoundation.org \ --cc=hpa@zytor.com \ --cc=iamjoonsoo.kim@lge.com \ --cc=jannh@google.com \ --cc=kasan-dev@googlegroups.com \ --cc=keescook@chromium.org \ --cc=linux-arm-kernel@lists.infradead.org \ --cc=linux-doc@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=luto@kernel.org \ --cc=mark.rutland@arm.com \ --cc=mingo@redhat.com \ --cc=paulmck@kernel.org \ --cc=penberg@kernel.org \ --cc=peterz@infradead.org \ --cc=rientjes@google.com \ --cc=tglx@linutronix.de \ --cc=will@kernel.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.