* [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-07 13:40 ` Marco Elver
0 siblings, 0 replies; 13+ messages in thread
From: Marco Elver @ 2020-09-07 13:40 UTC (permalink / raw)
To: elver, glider, akpm, catalin.marinas, cl, rientjes,
iamjoonsoo.kim, mark.rutland, penberg
Cc: linux-doc, peterz, dave.hansen, linux-mm, edumazet, hpa, will,
corbet, x86, kasan-dev, mingo, linux-arm-kernel, aryabinin,
keescook, paulmck, jannh, andreyknvl, cai, luto, tglx, dvyukov,
gregkh, linux-kernel, bp
From: Alexander Potapenko <glider@google.com>
Inserts KFENCE hooks into the SLAB allocator.
We note the addition of the 'orig_size' argument to slab_alloc*()
functions, to be able to pass the originally requested size to KFENCE.
When KFENCE is disabled, there is no additional overhead, since these
functions are __always_inline.
Co-developed-by: Marco Elver <elver@google.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
---
mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
mm/slab_common.c | 6 +++++-
2 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 3160dff6fd76..30aba06ae02b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -100,6 +100,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
+#include <linux/kfence.h>
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/module.h>
@@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
}
static __always_inline void *
-slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
unsigned long caller)
{
unsigned long save_flags;
@@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (unlikely(!cachep))
return NULL;
+ ptr = kfence_alloc(cachep, orig_size, flags);
+ if (unlikely(ptr))
+ goto out_hooks;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
@@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
+out_hooks:
slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
return ptr;
}
@@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */
static __always_inline void *
-slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
+slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
{
unsigned long save_flags;
void *objp;
@@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (unlikely(!cachep))
return NULL;
+ objp = kfence_alloc(cachep, orig_size, flags);
+ if (unlikely(objp))
+ goto leave;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
@@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
+leave:
slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
return objp;
}
@@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ if (kfence_free(objp)) {
+ kmemleak_free_recursive(objp, cachep->flags);
+ return;
+ }
+
/* Put the object into the quarantine, don't touch it for now. */
if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
@@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- void *ret = slab_alloc(cachep, flags, _RET_IP_);
+ void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3512,7 +3528,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_disable();
for (i = 0; i < size; i++) {
- void *objp = __do_cache_alloc(s, flags);
+ void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
if (unlikely(!objp))
goto error;
@@ -3545,7 +3561,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
void *ret;
- ret = slab_alloc(cachep, flags, _RET_IP_);
+ ret = slab_alloc(cachep, flags, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
@@ -3571,7 +3587,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
@@ -3589,7 +3605,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
{
void *ret;
- ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
@@ -3650,7 +3666,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- ret = slab_alloc(cachep, flags, caller);
+ ret = slab_alloc(cachep, flags, size, caller);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
@@ -4138,18 +4154,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
bool to_user)
{
struct kmem_cache *cachep;
- unsigned int objnr;
+ unsigned int objnr = 0;
unsigned long offset;
+ bool is_kfence = is_kfence_address(ptr);
ptr = kasan_reset_tag(ptr);
/* Find and validate object. */
cachep = page->slab_cache;
- objnr = obj_to_index(cachep, page, (void *)ptr);
- BUG_ON(objnr >= cachep->num);
+ if (!is_kfence) {
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+ }
/* Find offset within object. */
- offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+ if (is_kfence_address(ptr))
+ offset = ptr - kfence_object_start(ptr);
+ else
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
/* Allow address range falling entirely within usercopy region. */
if (offset >= cachep->useroffset &&
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f9ccd5dc13f3..6e35e273681a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -12,6 +12,7 @@
#include <linux/memory.h>
#include <linux/cache.h>
#include <linux/compiler.h>
+#include <linux/kfence.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
@@ -448,6 +449,9 @@ static int shutdown_cache(struct kmem_cache *s)
/* free asan quarantined objects */
kasan_cache_shutdown(s);
+ if (!kfence_shutdown_cache(s))
+ return -EBUSY;
+
if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
@@ -1171,7 +1175,7 @@ size_t ksize(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
return 0;
- size = __ksize(objp);
+ size = kfence_ksize(objp) ?: __ksize(objp);
/*
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
--
2.28.0.526.ge36021eeef-goog
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-07 13:40 ` Marco Elver
0 siblings, 0 replies; 13+ messages in thread
From: Marco Elver @ 2020-09-07 13:40 UTC (permalink / raw)
To: elver, glider, akpm, catalin.marinas, cl, rientjes,
iamjoonsoo.kim, mark.rutland, penberg
Cc: hpa, paulmck, andreyknvl, aryabinin, luto, bp, dave.hansen,
dvyukov, edumazet, gregkh, mingo, jannh, corbet, keescook,
peterz, cai, tglx, will, x86, linux-doc, linux-kernel, kasan-dev,
linux-arm-kernel, linux-mm
From: Alexander Potapenko <glider@google.com>
Inserts KFENCE hooks into the SLAB allocator.
We note the addition of the 'orig_size' argument to slab_alloc*()
functions, to be able to pass the originally requested size to KFENCE.
When KFENCE is disabled, there is no additional overhead, since these
functions are __always_inline.
Co-developed-by: Marco Elver <elver@google.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
---
mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
mm/slab_common.c | 6 +++++-
2 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 3160dff6fd76..30aba06ae02b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -100,6 +100,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
+#include <linux/kfence.h>
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/module.h>
@@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
}
static __always_inline void *
-slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
unsigned long caller)
{
unsigned long save_flags;
@@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (unlikely(!cachep))
return NULL;
+ ptr = kfence_alloc(cachep, orig_size, flags);
+ if (unlikely(ptr))
+ goto out_hooks;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
@@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
+out_hooks:
slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
return ptr;
}
@@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */
static __always_inline void *
-slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
+slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
{
unsigned long save_flags;
void *objp;
@@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (unlikely(!cachep))
return NULL;
+ objp = kfence_alloc(cachep, orig_size, flags);
+ if (unlikely(objp))
+ goto leave;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
@@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
+leave:
slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
return objp;
}
@@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ if (kfence_free(objp)) {
+ kmemleak_free_recursive(objp, cachep->flags);
+ return;
+ }
+
/* Put the object into the quarantine, don't touch it for now. */
if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
@@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- void *ret = slab_alloc(cachep, flags, _RET_IP_);
+ void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3512,7 +3528,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_disable();
for (i = 0; i < size; i++) {
- void *objp = __do_cache_alloc(s, flags);
+ void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
if (unlikely(!objp))
goto error;
@@ -3545,7 +3561,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
void *ret;
- ret = slab_alloc(cachep, flags, _RET_IP_);
+ ret = slab_alloc(cachep, flags, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
@@ -3571,7 +3587,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
@@ -3589,7 +3605,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
{
void *ret;
- ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
@@ -3650,7 +3666,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- ret = slab_alloc(cachep, flags, caller);
+ ret = slab_alloc(cachep, flags, size, caller);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
@@ -4138,18 +4154,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
bool to_user)
{
struct kmem_cache *cachep;
- unsigned int objnr;
+ unsigned int objnr = 0;
unsigned long offset;
+ bool is_kfence = is_kfence_address(ptr);
ptr = kasan_reset_tag(ptr);
/* Find and validate object. */
cachep = page->slab_cache;
- objnr = obj_to_index(cachep, page, (void *)ptr);
- BUG_ON(objnr >= cachep->num);
+ if (!is_kfence) {
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+ }
/* Find offset within object. */
- offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+ if (is_kfence_address(ptr))
+ offset = ptr - kfence_object_start(ptr);
+ else
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
/* Allow address range falling entirely within usercopy region. */
if (offset >= cachep->useroffset &&
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f9ccd5dc13f3..6e35e273681a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -12,6 +12,7 @@
#include <linux/memory.h>
#include <linux/cache.h>
#include <linux/compiler.h>
+#include <linux/kfence.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
@@ -448,6 +449,9 @@ static int shutdown_cache(struct kmem_cache *s)
/* free asan quarantined objects */
kasan_cache_shutdown(s);
+ if (!kfence_shutdown_cache(s))
+ return -EBUSY;
+
if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
@@ -1171,7 +1175,7 @@ size_t ksize(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
return 0;
- size = __ksize(objp);
+ size = kfence_ksize(objp) ?: __ksize(objp);
/*
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
--
2.28.0.526.ge36021eeef-goog
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
2020-09-07 13:40 ` Marco Elver
(?)
@ 2020-09-11 7:17 ` Dmitry Vyukov
-1 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 7:17 UTC (permalink / raw)
To: Marco Elver
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
>
> From: Alexander Potapenko <glider@google.com>
>
> Inserts KFENCE hooks into the SLAB allocator.
>
> We note the addition of the 'orig_size' argument to slab_alloc*()
> functions, to be able to pass the originally requested size to KFENCE.
> When KFENCE is disabled, there is no additional overhead, since these
> functions are __always_inline.
>
> Co-developed-by: Marco Elver <elver@google.com>
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> ---
> mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> mm/slab_common.c | 6 +++++-
> 2 files changed, 39 insertions(+), 13 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 3160dff6fd76..30aba06ae02b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -100,6 +100,7 @@
> #include <linux/seq_file.h>
> #include <linux/notifier.h>
> #include <linux/kallsyms.h>
> +#include <linux/kfence.h>
> #include <linux/cpu.h>
> #include <linux/sysctl.h>
> #include <linux/module.h>
> @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> }
>
> static __always_inline void *
> -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> unsigned long caller)
> {
> unsigned long save_flags;
> @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(!cachep))
> return NULL;
>
> + ptr = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(ptr))
> + goto out_hooks;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
>
> @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> memset(ptr, 0, cachep->object_size);
>
> +out_hooks:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> return ptr;
> }
> @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> #endif /* CONFIG_NUMA */
>
> static __always_inline void *
> -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> {
> unsigned long save_flags;
> void *objp;
> @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(!cachep))
> return NULL;
>
> + objp = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(objp))
> + goto leave;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
> objp = __do_cache_alloc(cachep, flags);
> @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> memset(objp, 0, cachep->object_size);
>
> +leave:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> return objp;
> }
> @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> unsigned long caller)
> {
> + if (kfence_free(objp)) {
> + kmemleak_free_recursive(objp, cachep->flags);
> + return;
> + }
> +
> /* Put the object into the quarantine, don't touch it for now. */
> if (kasan_slab_free(cachep, objp, _RET_IP_))
> return;
> @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> */
> void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> {
> - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
It's kinda minor, but since we are talking about malloc fast path:
will passing 0 instead of cachep->object_size (here and everywhere
else) and then using cachep->object_size on the slow path if 0 is
passed as size improve codegen?
> trace_kmem_cache_alloc(_RET_IP_, ret,
> cachep->object_size, cachep->size, flags);
> @@ -3512,7 +3528,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>
> local_irq_disable();
> for (i = 0; i < size; i++) {
> - void *objp = __do_cache_alloc(s, flags);
> + void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
>
> if (unlikely(!objp))
> goto error;
> @@ -3545,7 +3561,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> {
> void *ret;
>
> - ret = slab_alloc(cachep, flags, _RET_IP_);
> + ret = slab_alloc(cachep, flags, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(_RET_IP_, ret,
> @@ -3571,7 +3587,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
> */
> void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
> {
> - void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
>
> trace_kmem_cache_alloc_node(_RET_IP_, ret,
> cachep->object_size, cachep->size,
> @@ -3589,7 +3605,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
> {
> void *ret;
>
> - ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc_node(_RET_IP_, ret,
> @@ -3650,7 +3666,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
> cachep = kmalloc_slab(size, flags);
> if (unlikely(ZERO_OR_NULL_PTR(cachep)))
> return cachep;
> - ret = slab_alloc(cachep, flags, caller);
> + ret = slab_alloc(cachep, flags, size, caller);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(caller, ret,
> @@ -4138,18 +4154,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> bool to_user)
> {
> struct kmem_cache *cachep;
> - unsigned int objnr;
> + unsigned int objnr = 0;
> unsigned long offset;
> + bool is_kfence = is_kfence_address(ptr);
>
> ptr = kasan_reset_tag(ptr);
>
> /* Find and validate object. */
> cachep = page->slab_cache;
> - objnr = obj_to_index(cachep, page, (void *)ptr);
> - BUG_ON(objnr >= cachep->num);
> + if (!is_kfence) {
> + objnr = obj_to_index(cachep, page, (void *)ptr);
> + BUG_ON(objnr >= cachep->num);
> + }
>
> /* Find offset within object. */
> - offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
> + if (is_kfence_address(ptr))
> + offset = ptr - kfence_object_start(ptr);
> + else
> + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
>
> /* Allow address range falling entirely within usercopy region. */
> if (offset >= cachep->useroffset &&
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index f9ccd5dc13f3..6e35e273681a 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -12,6 +12,7 @@
> #include <linux/memory.h>
> #include <linux/cache.h>
> #include <linux/compiler.h>
> +#include <linux/kfence.h>
> #include <linux/module.h>
> #include <linux/cpu.h>
> #include <linux/uaccess.h>
> @@ -448,6 +449,9 @@ static int shutdown_cache(struct kmem_cache *s)
> /* free asan quarantined objects */
> kasan_cache_shutdown(s);
>
> + if (!kfence_shutdown_cache(s))
> + return -EBUSY;
> +
> if (__kmem_cache_shutdown(s) != 0)
> return -EBUSY;
>
> @@ -1171,7 +1175,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
> return 0;
>
> - size = __ksize(objp);
> + size = kfence_ksize(objp) ?: __ksize(objp);
> /*
> * We assume that ksize callers could use whole allocated area,
> * so we need to unpoison this area.
> --
> 2.28.0.526.ge36021eeef-goog
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 7:17 ` Dmitry Vyukov
0 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 7:17 UTC (permalink / raw)
To: Marco Elver
Cc: Mark Rutland, open list:DOCUMENTATION, Peter Zijlstra,
Catalin Marinas, Dave Hansen, Linux-MM, Eric Dumazet,
Alexander Potapenko, H. Peter Anvin, Christoph Lameter,
Will Deacon, Jonathan Corbet, the arch/x86 maintainers,
kasan-dev, Ingo Molnar, David Rientjes, Andrey Ryabinin,
Kees Cook, Paul E. McKenney, Jann Horn, Andrey Konovalov,
Borislav Petkov, Andy Lutomirski, Thomas Gleixner, Andrew Morton,
Linux ARM, Greg Kroah-Hartman, LKML, Pekka Enberg, Qian Cai,
Joonsoo Kim
On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
>
> From: Alexander Potapenko <glider@google.com>
>
> Inserts KFENCE hooks into the SLAB allocator.
>
> We note the addition of the 'orig_size' argument to slab_alloc*()
> functions, to be able to pass the originally requested size to KFENCE.
> When KFENCE is disabled, there is no additional overhead, since these
> functions are __always_inline.
>
> Co-developed-by: Marco Elver <elver@google.com>
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> ---
> mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> mm/slab_common.c | 6 +++++-
> 2 files changed, 39 insertions(+), 13 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 3160dff6fd76..30aba06ae02b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -100,6 +100,7 @@
> #include <linux/seq_file.h>
> #include <linux/notifier.h>
> #include <linux/kallsyms.h>
> +#include <linux/kfence.h>
> #include <linux/cpu.h>
> #include <linux/sysctl.h>
> #include <linux/module.h>
> @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> }
>
> static __always_inline void *
> -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> unsigned long caller)
> {
> unsigned long save_flags;
> @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(!cachep))
> return NULL;
>
> + ptr = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(ptr))
> + goto out_hooks;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
>
> @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> memset(ptr, 0, cachep->object_size);
>
> +out_hooks:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> return ptr;
> }
> @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> #endif /* CONFIG_NUMA */
>
> static __always_inline void *
> -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> {
> unsigned long save_flags;
> void *objp;
> @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(!cachep))
> return NULL;
>
> + objp = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(objp))
> + goto leave;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
> objp = __do_cache_alloc(cachep, flags);
> @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> memset(objp, 0, cachep->object_size);
>
> +leave:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> return objp;
> }
> @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> unsigned long caller)
> {
> + if (kfence_free(objp)) {
> + kmemleak_free_recursive(objp, cachep->flags);
> + return;
> + }
> +
> /* Put the object into the quarantine, don't touch it for now. */
> if (kasan_slab_free(cachep, objp, _RET_IP_))
> return;
> @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> */
> void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> {
> - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
It's kinda minor, but since we are talking about malloc fast path:
will passing 0 instead of cachep->object_size (here and everywhere
else) and then using cachep->object_size on the slow path if 0 is
passed as size improve codegen?
> trace_kmem_cache_alloc(_RET_IP_, ret,
> cachep->object_size, cachep->size, flags);
> @@ -3512,7 +3528,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>
> local_irq_disable();
> for (i = 0; i < size; i++) {
> - void *objp = __do_cache_alloc(s, flags);
> + void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
>
> if (unlikely(!objp))
> goto error;
> @@ -3545,7 +3561,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> {
> void *ret;
>
> - ret = slab_alloc(cachep, flags, _RET_IP_);
> + ret = slab_alloc(cachep, flags, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(_RET_IP_, ret,
> @@ -3571,7 +3587,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
> */
> void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
> {
> - void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
>
> trace_kmem_cache_alloc_node(_RET_IP_, ret,
> cachep->object_size, cachep->size,
> @@ -3589,7 +3605,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
> {
> void *ret;
>
> - ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc_node(_RET_IP_, ret,
> @@ -3650,7 +3666,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
> cachep = kmalloc_slab(size, flags);
> if (unlikely(ZERO_OR_NULL_PTR(cachep)))
> return cachep;
> - ret = slab_alloc(cachep, flags, caller);
> + ret = slab_alloc(cachep, flags, size, caller);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(caller, ret,
> @@ -4138,18 +4154,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> bool to_user)
> {
> struct kmem_cache *cachep;
> - unsigned int objnr;
> + unsigned int objnr = 0;
> unsigned long offset;
> + bool is_kfence = is_kfence_address(ptr);
>
> ptr = kasan_reset_tag(ptr);
>
> /* Find and validate object. */
> cachep = page->slab_cache;
> - objnr = obj_to_index(cachep, page, (void *)ptr);
> - BUG_ON(objnr >= cachep->num);
> + if (!is_kfence) {
> + objnr = obj_to_index(cachep, page, (void *)ptr);
> + BUG_ON(objnr >= cachep->num);
> + }
>
> /* Find offset within object. */
> - offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
> + if (is_kfence_address(ptr))
> + offset = ptr - kfence_object_start(ptr);
> + else
> + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
>
> /* Allow address range falling entirely within usercopy region. */
> if (offset >= cachep->useroffset &&
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index f9ccd5dc13f3..6e35e273681a 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -12,6 +12,7 @@
> #include <linux/memory.h>
> #include <linux/cache.h>
> #include <linux/compiler.h>
> +#include <linux/kfence.h>
> #include <linux/module.h>
> #include <linux/cpu.h>
> #include <linux/uaccess.h>
> @@ -448,6 +449,9 @@ static int shutdown_cache(struct kmem_cache *s)
> /* free asan quarantined objects */
> kasan_cache_shutdown(s);
>
> + if (!kfence_shutdown_cache(s))
> + return -EBUSY;
> +
> if (__kmem_cache_shutdown(s) != 0)
> return -EBUSY;
>
> @@ -1171,7 +1175,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
> return 0;
>
> - size = __ksize(objp);
> + size = kfence_ksize(objp) ?: __ksize(objp);
> /*
> * We assume that ksize callers could use whole allocated area,
> * so we need to unpoison this area.
> --
> 2.28.0.526.ge36021eeef-goog
>
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 7:17 ` Dmitry Vyukov
0 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 7:17 UTC (permalink / raw)
To: Marco Elver
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
>
> From: Alexander Potapenko <glider@google.com>
>
> Inserts KFENCE hooks into the SLAB allocator.
>
> We note the addition of the 'orig_size' argument to slab_alloc*()
> functions, to be able to pass the originally requested size to KFENCE.
> When KFENCE is disabled, there is no additional overhead, since these
> functions are __always_inline.
>
> Co-developed-by: Marco Elver <elver@google.com>
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> ---
> mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> mm/slab_common.c | 6 +++++-
> 2 files changed, 39 insertions(+), 13 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index 3160dff6fd76..30aba06ae02b 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -100,6 +100,7 @@
> #include <linux/seq_file.h>
> #include <linux/notifier.h>
> #include <linux/kallsyms.h>
> +#include <linux/kfence.h>
> #include <linux/cpu.h>
> #include <linux/sysctl.h>
> #include <linux/module.h>
> @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> }
>
> static __always_inline void *
> -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> unsigned long caller)
> {
> unsigned long save_flags;
> @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(!cachep))
> return NULL;
>
> + ptr = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(ptr))
> + goto out_hooks;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
>
> @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> memset(ptr, 0, cachep->object_size);
>
> +out_hooks:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> return ptr;
> }
> @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> #endif /* CONFIG_NUMA */
>
> static __always_inline void *
> -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> {
> unsigned long save_flags;
> void *objp;
> @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(!cachep))
> return NULL;
>
> + objp = kfence_alloc(cachep, orig_size, flags);
> + if (unlikely(objp))
> + goto leave;
> +
> cache_alloc_debugcheck_before(cachep, flags);
> local_irq_save(save_flags);
> objp = __do_cache_alloc(cachep, flags);
> @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> memset(objp, 0, cachep->object_size);
>
> +leave:
> slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> return objp;
> }
> @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> unsigned long caller)
> {
> + if (kfence_free(objp)) {
> + kmemleak_free_recursive(objp, cachep->flags);
> + return;
> + }
> +
> /* Put the object into the quarantine, don't touch it for now. */
> if (kasan_slab_free(cachep, objp, _RET_IP_))
> return;
> @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> */
> void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> {
> - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
It's kinda minor, but since we are talking about malloc fast path:
will passing 0 instead of cachep->object_size (here and everywhere
else) and then using cachep->object_size on the slow path if 0 is
passed as size improve codegen?
> trace_kmem_cache_alloc(_RET_IP_, ret,
> cachep->object_size, cachep->size, flags);
> @@ -3512,7 +3528,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>
> local_irq_disable();
> for (i = 0; i < size; i++) {
> - void *objp = __do_cache_alloc(s, flags);
> + void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
>
> if (unlikely(!objp))
> goto error;
> @@ -3545,7 +3561,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> {
> void *ret;
>
> - ret = slab_alloc(cachep, flags, _RET_IP_);
> + ret = slab_alloc(cachep, flags, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(_RET_IP_, ret,
> @@ -3571,7 +3587,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
> */
> void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
> {
> - void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
>
> trace_kmem_cache_alloc_node(_RET_IP_, ret,
> cachep->object_size, cachep->size,
> @@ -3589,7 +3605,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
> {
> void *ret;
>
> - ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
> + ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc_node(_RET_IP_, ret,
> @@ -3650,7 +3666,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
> cachep = kmalloc_slab(size, flags);
> if (unlikely(ZERO_OR_NULL_PTR(cachep)))
> return cachep;
> - ret = slab_alloc(cachep, flags, caller);
> + ret = slab_alloc(cachep, flags, size, caller);
>
> ret = kasan_kmalloc(cachep, ret, size, flags);
> trace_kmalloc(caller, ret,
> @@ -4138,18 +4154,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> bool to_user)
> {
> struct kmem_cache *cachep;
> - unsigned int objnr;
> + unsigned int objnr = 0;
> unsigned long offset;
> + bool is_kfence = is_kfence_address(ptr);
>
> ptr = kasan_reset_tag(ptr);
>
> /* Find and validate object. */
> cachep = page->slab_cache;
> - objnr = obj_to_index(cachep, page, (void *)ptr);
> - BUG_ON(objnr >= cachep->num);
> + if (!is_kfence) {
> + objnr = obj_to_index(cachep, page, (void *)ptr);
> + BUG_ON(objnr >= cachep->num);
> + }
>
> /* Find offset within object. */
> - offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
> + if (is_kfence_address(ptr))
> + offset = ptr - kfence_object_start(ptr);
> + else
> + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
>
> /* Allow address range falling entirely within usercopy region. */
> if (offset >= cachep->useroffset &&
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index f9ccd5dc13f3..6e35e273681a 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -12,6 +12,7 @@
> #include <linux/memory.h>
> #include <linux/cache.h>
> #include <linux/compiler.h>
> +#include <linux/kfence.h>
> #include <linux/module.h>
> #include <linux/cpu.h>
> #include <linux/uaccess.h>
> @@ -448,6 +449,9 @@ static int shutdown_cache(struct kmem_cache *s)
> /* free asan quarantined objects */
> kasan_cache_shutdown(s);
>
> + if (!kfence_shutdown_cache(s))
> + return -EBUSY;
> +
> if (__kmem_cache_shutdown(s) != 0)
> return -EBUSY;
>
> @@ -1171,7 +1175,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
> return 0;
>
> - size = __ksize(objp);
> + size = kfence_ksize(objp) ?: __ksize(objp);
> /*
> * We assume that ksize callers could use whole allocated area,
> * so we need to unpoison this area.
> --
> 2.28.0.526.ge36021eeef-goog
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
2020-09-11 7:17 ` Dmitry Vyukov
(?)
@ 2020-09-11 12:24 ` Marco Elver
-1 siblings, 0 replies; 13+ messages in thread
From: Marco Elver @ 2020-09-11 12:24 UTC (permalink / raw)
To: Dmitry Vyukov
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Fri, 11 Sep 2020 at 09:17, Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
> >
> > From: Alexander Potapenko <glider@google.com>
> >
> > Inserts KFENCE hooks into the SLAB allocator.
> >
> > We note the addition of the 'orig_size' argument to slab_alloc*()
> > functions, to be able to pass the originally requested size to KFENCE.
> > When KFENCE is disabled, there is no additional overhead, since these
> > functions are __always_inline.
> >
> > Co-developed-by: Marco Elver <elver@google.com>
> > Signed-off-by: Marco Elver <elver@google.com>
> > Signed-off-by: Alexander Potapenko <glider@google.com>
> > ---
> > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > mm/slab_common.c | 6 +++++-
> > 2 files changed, 39 insertions(+), 13 deletions(-)
> >
> > diff --git a/mm/slab.c b/mm/slab.c
> > index 3160dff6fd76..30aba06ae02b 100644
> > --- a/mm/slab.c
> > +++ b/mm/slab.c
> > @@ -100,6 +100,7 @@
> > #include <linux/seq_file.h>
> > #include <linux/notifier.h>
> > #include <linux/kallsyms.h>
> > +#include <linux/kfence.h>
> > #include <linux/cpu.h>
> > #include <linux/sysctl.h>
> > #include <linux/module.h>
> > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > }
> >
> > static __always_inline void *
> > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > unsigned long caller)
> > {
> > unsigned long save_flags;
> > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + ptr = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(ptr))
> > + goto out_hooks;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> >
> > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > memset(ptr, 0, cachep->object_size);
> >
> > +out_hooks:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > return ptr;
> > }
> > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > #endif /* CONFIG_NUMA */
> >
> > static __always_inline void *
> > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > {
> > unsigned long save_flags;
> > void *objp;
> > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + objp = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(objp))
> > + goto leave;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> > objp = __do_cache_alloc(cachep, flags);
> > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > memset(objp, 0, cachep->object_size);
> >
> > +leave:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > return objp;
> > }
> > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > unsigned long caller)
> > {
> > + if (kfence_free(objp)) {
> > + kmemleak_free_recursive(objp, cachep->flags);
> > + return;
> > + }
> > +
> > /* Put the object into the quarantine, don't touch it for now. */
> > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > return;
> > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > */
> > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > {
> > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
>
>
> It's kinda minor, but since we are talking about malloc fast path:
> will passing 0 instead of cachep->object_size (here and everywhere
> else) and then using cachep->object_size on the slow path if 0 is
> passed as size improve codegen?
It doesn't save us much, maybe 1 instruction based on what I'm looking
at right now. The main worry I have is that the 'orig_size' argument
is now part of slab_alloc, and changing its semantics may cause
problems in future if it's no longer just passed to kfence_alloc().
Today, we can do the 'size = size ?: cache->object_size' trick inside
kfence_alloc(), but at the cost breaking the intuitive semantics of
slab_alloc's orig_size argument for future users. Is it worth it?
Thanks,
-- Marco
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 12:24 ` Marco Elver
0 siblings, 0 replies; 13+ messages in thread
From: Marco Elver @ 2020-09-11 12:24 UTC (permalink / raw)
To: Dmitry Vyukov
Cc: Mark Rutland, open list:DOCUMENTATION, Peter Zijlstra,
Catalin Marinas, Dave Hansen, Linux-MM, Eric Dumazet,
Alexander Potapenko, H. Peter Anvin, Christoph Lameter,
Will Deacon, Jonathan Corbet, the arch/x86 maintainers,
kasan-dev, Ingo Molnar, David Rientjes, Andrey Ryabinin,
Kees Cook, Paul E. McKenney, Jann Horn, Andrey Konovalov,
Borislav Petkov, Andy Lutomirski, Thomas Gleixner, Andrew Morton,
Linux ARM, Greg Kroah-Hartman, LKML, Pekka Enberg, Qian Cai,
Joonsoo Kim
On Fri, 11 Sep 2020 at 09:17, Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
> >
> > From: Alexander Potapenko <glider@google.com>
> >
> > Inserts KFENCE hooks into the SLAB allocator.
> >
> > We note the addition of the 'orig_size' argument to slab_alloc*()
> > functions, to be able to pass the originally requested size to KFENCE.
> > When KFENCE is disabled, there is no additional overhead, since these
> > functions are __always_inline.
> >
> > Co-developed-by: Marco Elver <elver@google.com>
> > Signed-off-by: Marco Elver <elver@google.com>
> > Signed-off-by: Alexander Potapenko <glider@google.com>
> > ---
> > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > mm/slab_common.c | 6 +++++-
> > 2 files changed, 39 insertions(+), 13 deletions(-)
> >
> > diff --git a/mm/slab.c b/mm/slab.c
> > index 3160dff6fd76..30aba06ae02b 100644
> > --- a/mm/slab.c
> > +++ b/mm/slab.c
> > @@ -100,6 +100,7 @@
> > #include <linux/seq_file.h>
> > #include <linux/notifier.h>
> > #include <linux/kallsyms.h>
> > +#include <linux/kfence.h>
> > #include <linux/cpu.h>
> > #include <linux/sysctl.h>
> > #include <linux/module.h>
> > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > }
> >
> > static __always_inline void *
> > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > unsigned long caller)
> > {
> > unsigned long save_flags;
> > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + ptr = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(ptr))
> > + goto out_hooks;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> >
> > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > memset(ptr, 0, cachep->object_size);
> >
> > +out_hooks:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > return ptr;
> > }
> > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > #endif /* CONFIG_NUMA */
> >
> > static __always_inline void *
> > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > {
> > unsigned long save_flags;
> > void *objp;
> > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + objp = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(objp))
> > + goto leave;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> > objp = __do_cache_alloc(cachep, flags);
> > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > memset(objp, 0, cachep->object_size);
> >
> > +leave:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > return objp;
> > }
> > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > unsigned long caller)
> > {
> > + if (kfence_free(objp)) {
> > + kmemleak_free_recursive(objp, cachep->flags);
> > + return;
> > + }
> > +
> > /* Put the object into the quarantine, don't touch it for now. */
> > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > return;
> > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > */
> > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > {
> > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
>
>
> It's kinda minor, but since we are talking about malloc fast path:
> will passing 0 instead of cachep->object_size (here and everywhere
> else) and then using cachep->object_size on the slow path if 0 is
> passed as size improve codegen?
It doesn't save us much, maybe 1 instruction based on what I'm looking
at right now. The main worry I have is that the 'orig_size' argument
is now part of slab_alloc, and changing its semantics may cause
problems in future if it's no longer just passed to kfence_alloc().
Today, we can do the 'size = size ?: cache->object_size' trick inside
kfence_alloc(), but at the cost breaking the intuitive semantics of
slab_alloc's orig_size argument for future users. Is it worth it?
Thanks,
-- Marco
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 12:24 ` Marco Elver
0 siblings, 0 replies; 13+ messages in thread
From: Marco Elver @ 2020-09-11 12:24 UTC (permalink / raw)
To: Dmitry Vyukov
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Fri, 11 Sep 2020 at 09:17, Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Mon, Sep 7, 2020 at 3:41 PM Marco Elver <elver@google.com> wrote:
> >
> > From: Alexander Potapenko <glider@google.com>
> >
> > Inserts KFENCE hooks into the SLAB allocator.
> >
> > We note the addition of the 'orig_size' argument to slab_alloc*()
> > functions, to be able to pass the originally requested size to KFENCE.
> > When KFENCE is disabled, there is no additional overhead, since these
> > functions are __always_inline.
> >
> > Co-developed-by: Marco Elver <elver@google.com>
> > Signed-off-by: Marco Elver <elver@google.com>
> > Signed-off-by: Alexander Potapenko <glider@google.com>
> > ---
> > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > mm/slab_common.c | 6 +++++-
> > 2 files changed, 39 insertions(+), 13 deletions(-)
> >
> > diff --git a/mm/slab.c b/mm/slab.c
> > index 3160dff6fd76..30aba06ae02b 100644
> > --- a/mm/slab.c
> > +++ b/mm/slab.c
> > @@ -100,6 +100,7 @@
> > #include <linux/seq_file.h>
> > #include <linux/notifier.h>
> > #include <linux/kallsyms.h>
> > +#include <linux/kfence.h>
> > #include <linux/cpu.h>
> > #include <linux/sysctl.h>
> > #include <linux/module.h>
> > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > }
> >
> > static __always_inline void *
> > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > unsigned long caller)
> > {
> > unsigned long save_flags;
> > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + ptr = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(ptr))
> > + goto out_hooks;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> >
> > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > memset(ptr, 0, cachep->object_size);
> >
> > +out_hooks:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > return ptr;
> > }
> > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > #endif /* CONFIG_NUMA */
> >
> > static __always_inline void *
> > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > {
> > unsigned long save_flags;
> > void *objp;
> > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(!cachep))
> > return NULL;
> >
> > + objp = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(objp))
> > + goto leave;
> > +
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> > objp = __do_cache_alloc(cachep, flags);
> > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > memset(objp, 0, cachep->object_size);
> >
> > +leave:
> > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > return objp;
> > }
> > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > unsigned long caller)
> > {
> > + if (kfence_free(objp)) {
> > + kmemleak_free_recursive(objp, cachep->flags);
> > + return;
> > + }
> > +
> > /* Put the object into the quarantine, don't touch it for now. */
> > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > return;
> > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > */
> > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > {
> > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
>
>
> It's kinda minor, but since we are talking about malloc fast path:
> will passing 0 instead of cachep->object_size (here and everywhere
> else) and then using cachep->object_size on the slow path if 0 is
> passed as size improve codegen?
It doesn't save us much, maybe 1 instruction based on what I'm looking
at right now. The main worry I have is that the 'orig_size' argument
is now part of slab_alloc, and changing its semantics may cause
problems in future if it's no longer just passed to kfence_alloc().
Today, we can do the 'size = size ?: cache->object_size' trick inside
kfence_alloc(), but at the cost breaking the intuitive semantics of
slab_alloc's orig_size argument for future users. Is it worth it?
Thanks,
-- Marco
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
2020-09-11 12:24 ` Marco Elver
(?)
@ 2020-09-11 13:03 ` Dmitry Vyukov
-1 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 13:03 UTC (permalink / raw)
To: Marco Elver
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Fri, Sep 11, 2020 at 2:24 PM Marco Elver <elver@google.com> wrote:
> > > From: Alexander Potapenko <glider@google.com>
> > >
> > > Inserts KFENCE hooks into the SLAB allocator.
> > >
> > > We note the addition of the 'orig_size' argument to slab_alloc*()
> > > functions, to be able to pass the originally requested size to KFENCE.
> > > When KFENCE is disabled, there is no additional overhead, since these
> > > functions are __always_inline.
> > >
> > > Co-developed-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Alexander Potapenko <glider@google.com>
> > > ---
> > > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > > mm/slab_common.c | 6 +++++-
> > > 2 files changed, 39 insertions(+), 13 deletions(-)
> > >
> > > diff --git a/mm/slab.c b/mm/slab.c
> > > index 3160dff6fd76..30aba06ae02b 100644
> > > --- a/mm/slab.c
> > > +++ b/mm/slab.c
> > > @@ -100,6 +100,7 @@
> > > #include <linux/seq_file.h>
> > > #include <linux/notifier.h>
> > > #include <linux/kallsyms.h>
> > > +#include <linux/kfence.h>
> > > #include <linux/cpu.h>
> > > #include <linux/sysctl.h>
> > > #include <linux/module.h>
> > > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > > }
> > >
> > > static __always_inline void *
> > > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > > unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + ptr = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(ptr))
> > > + goto out_hooks;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > >
> > > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > > memset(ptr, 0, cachep->object_size);
> > >
> > > +out_hooks:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > > return ptr;
> > > }
> > > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > #endif /* CONFIG_NUMA */
> > >
> > > static __always_inline void *
> > > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > void *objp;
> > > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + objp = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(objp))
> > > + goto leave;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > > objp = __do_cache_alloc(cachep, flags);
> > > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > > memset(objp, 0, cachep->object_size);
> > >
> > > +leave:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > > return objp;
> > > }
> > > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > > unsigned long caller)
> > > {
> > > + if (kfence_free(objp)) {
> > > + kmemleak_free_recursive(objp, cachep->flags);
> > > + return;
> > > + }
> > > +
> > > /* Put the object into the quarantine, don't touch it for now. */
> > > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > > return;
> > > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > > */
> > > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > {
> > > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
> >
> >
> > It's kinda minor, but since we are talking about malloc fast path:
> > will passing 0 instead of cachep->object_size (here and everywhere
> > else) and then using cachep->object_size on the slow path if 0 is
> > passed as size improve codegen?
>
> It doesn't save us much, maybe 1 instruction based on what I'm looking
> at right now. The main worry I have is that the 'orig_size' argument
> is now part of slab_alloc, and changing its semantics may cause
> problems in future if it's no longer just passed to kfence_alloc().
> Today, we can do the 'size = size ?: cache->object_size' trick inside
> kfence_alloc(), but at the cost breaking the intuitive semantics of
> slab_alloc's orig_size argument for future users. Is it worth it?
I don't have an answer to this question. I will leave this to others.
If nobody has strong support for changing semantics, let's leave it as
is. Maybe keep in mind as potential ballast.
FWIW most likely misuse of 0 size for other future purposes should
manifest itself in a quite straightforward way.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 13:03 ` Dmitry Vyukov
0 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 13:03 UTC (permalink / raw)
To: Marco Elver
Cc: Mark Rutland, open list:DOCUMENTATION, Peter Zijlstra,
Catalin Marinas, Dave Hansen, Linux-MM, Eric Dumazet,
Alexander Potapenko, H. Peter Anvin, Christoph Lameter,
Will Deacon, Jonathan Corbet, the arch/x86 maintainers,
kasan-dev, Ingo Molnar, David Rientjes, Andrey Ryabinin,
Kees Cook, Paul E. McKenney, Jann Horn, Andrey Konovalov,
Borislav Petkov, Andy Lutomirski, Thomas Gleixner, Andrew Morton,
Linux ARM, Greg Kroah-Hartman, LKML, Pekka Enberg, Qian Cai,
Joonsoo Kim
On Fri, Sep 11, 2020 at 2:24 PM Marco Elver <elver@google.com> wrote:
> > > From: Alexander Potapenko <glider@google.com>
> > >
> > > Inserts KFENCE hooks into the SLAB allocator.
> > >
> > > We note the addition of the 'orig_size' argument to slab_alloc*()
> > > functions, to be able to pass the originally requested size to KFENCE.
> > > When KFENCE is disabled, there is no additional overhead, since these
> > > functions are __always_inline.
> > >
> > > Co-developed-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Alexander Potapenko <glider@google.com>
> > > ---
> > > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > > mm/slab_common.c | 6 +++++-
> > > 2 files changed, 39 insertions(+), 13 deletions(-)
> > >
> > > diff --git a/mm/slab.c b/mm/slab.c
> > > index 3160dff6fd76..30aba06ae02b 100644
> > > --- a/mm/slab.c
> > > +++ b/mm/slab.c
> > > @@ -100,6 +100,7 @@
> > > #include <linux/seq_file.h>
> > > #include <linux/notifier.h>
> > > #include <linux/kallsyms.h>
> > > +#include <linux/kfence.h>
> > > #include <linux/cpu.h>
> > > #include <linux/sysctl.h>
> > > #include <linux/module.h>
> > > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > > }
> > >
> > > static __always_inline void *
> > > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > > unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + ptr = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(ptr))
> > > + goto out_hooks;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > >
> > > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > > memset(ptr, 0, cachep->object_size);
> > >
> > > +out_hooks:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > > return ptr;
> > > }
> > > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > #endif /* CONFIG_NUMA */
> > >
> > > static __always_inline void *
> > > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > void *objp;
> > > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + objp = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(objp))
> > > + goto leave;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > > objp = __do_cache_alloc(cachep, flags);
> > > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > > memset(objp, 0, cachep->object_size);
> > >
> > > +leave:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > > return objp;
> > > }
> > > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > > unsigned long caller)
> > > {
> > > + if (kfence_free(objp)) {
> > > + kmemleak_free_recursive(objp, cachep->flags);
> > > + return;
> > > + }
> > > +
> > > /* Put the object into the quarantine, don't touch it for now. */
> > > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > > return;
> > > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > > */
> > > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > {
> > > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
> >
> >
> > It's kinda minor, but since we are talking about malloc fast path:
> > will passing 0 instead of cachep->object_size (here and everywhere
> > else) and then using cachep->object_size on the slow path if 0 is
> > passed as size improve codegen?
>
> It doesn't save us much, maybe 1 instruction based on what I'm looking
> at right now. The main worry I have is that the 'orig_size' argument
> is now part of slab_alloc, and changing its semantics may cause
> problems in future if it's no longer just passed to kfence_alloc().
> Today, we can do the 'size = size ?: cache->object_size' trick inside
> kfence_alloc(), but at the cost breaking the intuitive semantics of
> slab_alloc's orig_size argument for future users. Is it worth it?
I don't have an answer to this question. I will leave this to others.
If nobody has strong support for changing semantics, let's leave it as
is. Maybe keep in mind as potential ballast.
FWIW most likely misuse of 0 size for other future purposes should
manifest itself in a quite straightforward way.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH RFC 04/10] mm, kfence: insert KFENCE hooks for SLAB
@ 2020-09-11 13:03 ` Dmitry Vyukov
0 siblings, 0 replies; 13+ messages in thread
From: Dmitry Vyukov @ 2020-09-11 13:03 UTC (permalink / raw)
To: Marco Elver
Cc: Alexander Potapenko, Andrew Morton, Catalin Marinas,
Christoph Lameter, David Rientjes, Joonsoo Kim, Mark Rutland,
Pekka Enberg, H. Peter Anvin, Paul E. McKenney, Andrey Konovalov,
Andrey Ryabinin, Andy Lutomirski, Borislav Petkov, Dave Hansen,
Eric Dumazet, Greg Kroah-Hartman, Ingo Molnar, Jann Horn,
Jonathan Corbet, Kees Cook, Peter Zijlstra, Qian Cai,
Thomas Gleixner, Will Deacon, the arch/x86 maintainers,
open list:DOCUMENTATION, LKML, kasan-dev, Linux ARM, Linux-MM
On Fri, Sep 11, 2020 at 2:24 PM Marco Elver <elver@google.com> wrote:
> > > From: Alexander Potapenko <glider@google.com>
> > >
> > > Inserts KFENCE hooks into the SLAB allocator.
> > >
> > > We note the addition of the 'orig_size' argument to slab_alloc*()
> > > functions, to be able to pass the originally requested size to KFENCE.
> > > When KFENCE is disabled, there is no additional overhead, since these
> > > functions are __always_inline.
> > >
> > > Co-developed-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Alexander Potapenko <glider@google.com>
> > > ---
> > > mm/slab.c | 46 ++++++++++++++++++++++++++++++++++------------
> > > mm/slab_common.c | 6 +++++-
> > > 2 files changed, 39 insertions(+), 13 deletions(-)
> > >
> > > diff --git a/mm/slab.c b/mm/slab.c
> > > index 3160dff6fd76..30aba06ae02b 100644
> > > --- a/mm/slab.c
> > > +++ b/mm/slab.c
> > > @@ -100,6 +100,7 @@
> > > #include <linux/seq_file.h>
> > > #include <linux/notifier.h>
> > > #include <linux/kallsyms.h>
> > > +#include <linux/kfence.h>
> > > #include <linux/cpu.h>
> > > #include <linux/sysctl.h>
> > > #include <linux/module.h>
> > > @@ -3206,7 +3207,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> > > }
> > >
> > > static __always_inline void *
> > > -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > > unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > @@ -3219,6 +3220,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + ptr = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(ptr))
> > > + goto out_hooks;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > >
> > > @@ -3251,6 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
> > > memset(ptr, 0, cachep->object_size);
> > >
> > > +out_hooks:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
> > > return ptr;
> > > }
> > > @@ -3288,7 +3294,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > #endif /* CONFIG_NUMA */
> > >
> > > static __always_inline void *
> > > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > > {
> > > unsigned long save_flags;
> > > void *objp;
> > > @@ -3299,6 +3305,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(!cachep))
> > > return NULL;
> > >
> > > + objp = kfence_alloc(cachep, orig_size, flags);
> > > + if (unlikely(objp))
> > > + goto leave;
> > > +
> > > cache_alloc_debugcheck_before(cachep, flags);
> > > local_irq_save(save_flags);
> > > objp = __do_cache_alloc(cachep, flags);
> > > @@ -3309,6 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
> > > if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
> > > memset(objp, 0, cachep->object_size);
> > >
> > > +leave:
> > > slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
> > > return objp;
> > > }
> > > @@ -3414,6 +3425,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
> > > static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > > unsigned long caller)
> > > {
> > > + if (kfence_free(objp)) {
> > > + kmemleak_free_recursive(objp, cachep->flags);
> > > + return;
> > > + }
> > > +
> > > /* Put the object into the quarantine, don't touch it for now. */
> > > if (kasan_slab_free(cachep, objp, _RET_IP_))
> > > return;
> > > @@ -3479,7 +3495,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > > */
> > > void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> > > {
> > > - void *ret = slab_alloc(cachep, flags, _RET_IP_);
> > > + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
> >
> >
> > It's kinda minor, but since we are talking about malloc fast path:
> > will passing 0 instead of cachep->object_size (here and everywhere
> > else) and then using cachep->object_size on the slow path if 0 is
> > passed as size improve codegen?
>
> It doesn't save us much, maybe 1 instruction based on what I'm looking
> at right now. The main worry I have is that the 'orig_size' argument
> is now part of slab_alloc, and changing its semantics may cause
> problems in future if it's no longer just passed to kfence_alloc().
> Today, we can do the 'size = size ?: cache->object_size' trick inside
> kfence_alloc(), but at the cost breaking the intuitive semantics of
> slab_alloc's orig_size argument for future users. Is it worth it?
I don't have an answer to this question. I will leave this to others.
If nobody has strong support for changing semantics, let's leave it as
is. Maybe keep in mind as potential ballast.
FWIW most likely misuse of 0 size for other future purposes should
manifest itself in a quite straightforward way.
^ permalink raw reply [flat|nested] 13+ messages in thread