linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/2] provide the flexibility to enable KFENCE
@ 2022-03-07  7:45 Tianchen Ding
  2022-03-07  7:45 ` [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup Tianchen Ding
  2022-03-07  7:45 ` [PATCH v3 2/2] kfence: Alloc kfence_pool " Tianchen Ding
  0 siblings, 2 replies; 6+ messages in thread
From: Tianchen Ding @ 2022-03-07  7:45 UTC (permalink / raw)
  To: Alexander Potapenko, Marco Elver, Dmitry Vyukov, Andrew Morton
  Cc: kasan-dev, linux-mm, linux-kernel

This is v3 for (re-)enabling KFENCE.

If CONFIG_CONTIG_ALLOC is not supported, we fallback to try
alloc_pages_exact(). Allocating pages in this way has limits about
MAX_ORDER (default 11). So we will not support allocating kfence pool
after system startup with a large KFENCE_NUM_OBJECTS.

When handling failures in kfence_init_pool_late(), we pair
free_pages_exact() to alloc_pages_exact() for compatibility
consideration, though it actually does the same as free_contig_range().

v3:
Use alloc_pages_exact() instead of alloc_contig_pages()
if CONFIG_CONTIG_ALLOC is not defined.

v2: https://lore.kernel.org/all/20220305144858.17040-1-dtcccc@linux.alibaba.com/
Take KFENCE_WARN_ON() into account. Do not allow re-enabling KFENCE
if it once disabled by warn.
Modify func names and comments.

RFC/v1: https://lore.kernel.org/all/20220303031505.28495-1-dtcccc@linux.alibaba.com/

Tianchen Ding (2):
  kfence: Allow re-enabling KFENCE after system startup
  kfence: Alloc kfence_pool after system startup

 mm/kfence/core.c | 126 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 105 insertions(+), 21 deletions(-)

-- 
2.27.0



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup
  2022-03-07  7:45 [PATCH v3 0/2] provide the flexibility to enable KFENCE Tianchen Ding
@ 2022-03-07  7:45 ` Tianchen Ding
  2022-03-07 14:08   ` Marco Elver
  2022-03-07  7:45 ` [PATCH v3 2/2] kfence: Alloc kfence_pool " Tianchen Ding
  1 sibling, 1 reply; 6+ messages in thread
From: Tianchen Ding @ 2022-03-07  7:45 UTC (permalink / raw)
  To: Alexander Potapenko, Marco Elver, Dmitry Vyukov, Andrew Morton
  Cc: kasan-dev, linux-mm, linux-kernel

If once KFENCE is disabled by:
echo 0 > /sys/module/kfence/parameters/sample_interval
KFENCE could never be re-enabled until next rebooting.

Allow re-enabling it by writing a positive num to sample_interval.

Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
---
 mm/kfence/core.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 13128fa13062..caa4e84c8b79 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -38,14 +38,17 @@
 #define KFENCE_WARN_ON(cond)                                                   \
 	({                                                                     \
 		const bool __cond = WARN_ON(cond);                             \
-		if (unlikely(__cond))                                          \
+		if (unlikely(__cond)) {                                        \
 			WRITE_ONCE(kfence_enabled, false);                     \
+			disabled_by_warn = true;                               \
+		}                                                              \
 		__cond;                                                        \
 	})
 
 /* === Data ================================================================= */
 
 static bool kfence_enabled __read_mostly;
+static bool disabled_by_warn __read_mostly;
 
 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
@@ -55,6 +58,7 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
 #endif
 #define MODULE_PARAM_PREFIX "kfence."
 
+static int kfence_enable_late(void);
 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
 {
 	unsigned long num;
@@ -65,10 +69,11 @@ static int param_set_sample_interval(const char *val, const struct kernel_param
 
 	if (!num) /* Using 0 to indicate KFENCE is disabled. */
 		WRITE_ONCE(kfence_enabled, false);
-	else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
-		return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
 
 	*((unsigned long *)kp->arg) = num;
+
+	if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
+		return disabled_by_warn ? -EINVAL : kfence_enable_late();
 	return 0;
 }
 
@@ -787,6 +792,16 @@ void __init kfence_init(void)
 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
 }
 
+static int kfence_enable_late(void)
+{
+	if (!__kfence_pool)
+		return -EINVAL;
+
+	WRITE_ONCE(kfence_enabled, true);
+	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
+	return 0;
+}
+
 void kfence_shutdown_cache(struct kmem_cache *s)
 {
 	unsigned long flags;
-- 
2.27.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] kfence: Alloc kfence_pool after system startup
  2022-03-07  7:45 [PATCH v3 0/2] provide the flexibility to enable KFENCE Tianchen Ding
  2022-03-07  7:45 ` [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup Tianchen Ding
@ 2022-03-07  7:45 ` Tianchen Ding
  2022-03-07 14:08   ` Marco Elver
  2022-03-10  1:43   ` liupeng (DM)
  1 sibling, 2 replies; 6+ messages in thread
From: Tianchen Ding @ 2022-03-07  7:45 UTC (permalink / raw)
  To: Alexander Potapenko, Marco Elver, Dmitry Vyukov, Andrew Morton
  Cc: kasan-dev, linux-mm, linux-kernel

Allow enabling KFENCE after system startup by allocating its pool via the
page allocator. This provides the flexibility to enable KFENCE even if it
wasn't enabled at boot time.

Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
---
 mm/kfence/core.c | 111 ++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 90 insertions(+), 21 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index caa4e84c8b79..f126b53b9b85 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -96,7 +96,7 @@ static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
 
 /* The pool of pages used for guard pages and objects. */
-char *__kfence_pool __ro_after_init;
+char *__kfence_pool __read_mostly;
 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
 
 /*
@@ -537,17 +537,19 @@ static void rcu_guarded_free(struct rcu_head *h)
 	kfence_guarded_free((void *)meta->addr, meta, false);
 }
 
-static bool __init kfence_init_pool(void)
+/*
+ * Initialization of the KFENCE pool after its allocation.
+ * Returns 0 on success; otherwise returns the address up to
+ * which partial initialization succeeded.
+ */
+static unsigned long kfence_init_pool(void)
 {
 	unsigned long addr = (unsigned long)__kfence_pool;
 	struct page *pages;
 	int i;
 
-	if (!__kfence_pool)
-		return false;
-
 	if (!arch_kfence_init_pool())
-		goto err;
+		return addr;
 
 	pages = virt_to_page(addr);
 
@@ -565,7 +567,7 @@ static bool __init kfence_init_pool(void)
 
 		/* Verify we do not have a compound head page. */
 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
-			goto err;
+			return addr;
 
 		__SetPageSlab(&pages[i]);
 	}
@@ -578,7 +580,7 @@ static bool __init kfence_init_pool(void)
 	 */
 	for (i = 0; i < 2; i++) {
 		if (unlikely(!kfence_protect(addr)))
-			goto err;
+			return addr;
 
 		addr += PAGE_SIZE;
 	}
@@ -595,7 +597,7 @@ static bool __init kfence_init_pool(void)
 
 		/* Protect the right redzone. */
 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
-			goto err;
+			return addr;
 
 		addr += 2 * PAGE_SIZE;
 	}
@@ -608,9 +610,21 @@ static bool __init kfence_init_pool(void)
 	 */
 	kmemleak_free(__kfence_pool);
 
-	return true;
+	return 0;
+}
+
+static bool __init kfence_init_pool_early(void)
+{
+	unsigned long addr;
+
+	if (!__kfence_pool)
+		return false;
+
+	addr = kfence_init_pool();
+
+	if (!addr)
+		return true;
 
-err:
 	/*
 	 * Only release unprotected pages, and do not try to go back and change
 	 * page attributes due to risk of failing to do so as well. If changing
@@ -623,6 +637,26 @@ static bool __init kfence_init_pool(void)
 	return false;
 }
 
+static bool kfence_init_pool_late(void)
+{
+	unsigned long addr, free_size;
+
+	addr = kfence_init_pool();
+
+	if (!addr)
+		return true;
+
+	/* Same as above. */
+	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
+#ifdef CONFIG_CONTIG_ALLOC
+	free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
+#else
+	free_pages_exact((void *)addr, free_size);
+#endif
+	__kfence_pool = NULL;
+	return false;
+}
+
 /* === DebugFS Interface ==================================================== */
 
 static int stats_show(struct seq_file *seq, void *v)
@@ -771,31 +805,66 @@ void __init kfence_alloc_pool(void)
 		pr_err("failed to allocate pool\n");
 }
 
+static void kfence_init_enable(void)
+{
+	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
+		static_branch_enable(&kfence_allocation_key);
+	WRITE_ONCE(kfence_enabled, true);
+	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
+	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
+		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
+		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
+}
+
 void __init kfence_init(void)
 {
+	stack_hash_seed = (u32)random_get_entropy();
+
 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
 	if (!kfence_sample_interval)
 		return;
 
-	stack_hash_seed = (u32)random_get_entropy();
-	if (!kfence_init_pool()) {
+	if (!kfence_init_pool_early()) {
 		pr_err("%s failed\n", __func__);
 		return;
 	}
 
-	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
-		static_branch_enable(&kfence_allocation_key);
-	WRITE_ONCE(kfence_enabled, true);
-	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
-	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
-		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
-		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
+	kfence_init_enable();
+}
+
+static int kfence_init_late(void)
+{
+	const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
+#ifdef CONFIG_CONTIG_ALLOC
+	struct page *pages;
+
+	pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
+	if (!pages)
+		return -ENOMEM;
+	__kfence_pool = page_to_virt(pages);
+#else
+	if (nr_pages > MAX_ORDER_NR_PAGES) {
+		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
+		return -EINVAL;
+	}
+	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+	if (!__kfence_pool)
+		return -ENOMEM;
+#endif
+
+	if (!kfence_init_pool_late()) {
+		pr_err("%s failed\n", __func__);
+		return -EBUSY;
+	}
+
+	kfence_init_enable();
+	return 0;
 }
 
 static int kfence_enable_late(void)
 {
 	if (!__kfence_pool)
-		return -EINVAL;
+		return kfence_init_late();
 
 	WRITE_ONCE(kfence_enabled, true);
 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
-- 
2.27.0



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup
  2022-03-07  7:45 ` [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup Tianchen Ding
@ 2022-03-07 14:08   ` Marco Elver
  0 siblings, 0 replies; 6+ messages in thread
From: Marco Elver @ 2022-03-07 14:08 UTC (permalink / raw)
  To: Tianchen Ding
  Cc: Alexander Potapenko, Dmitry Vyukov, Andrew Morton, kasan-dev,
	linux-mm, linux-kernel

On Mon, 7 Mar 2022 at 08:45, Tianchen Ding <dtcccc@linux.alibaba.com> wrote:
>
> If once KFENCE is disabled by:
> echo 0 > /sys/module/kfence/parameters/sample_interval
> KFENCE could never be re-enabled until next rebooting.
>
> Allow re-enabling it by writing a positive num to sample_interval.
>
> Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>

Reviewed-by: Marco Elver <elver@google.com>


> ---
>  mm/kfence/core.c | 21 ++++++++++++++++++---
>  1 file changed, 18 insertions(+), 3 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 13128fa13062..caa4e84c8b79 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -38,14 +38,17 @@
>  #define KFENCE_WARN_ON(cond)                                                   \
>         ({                                                                     \
>                 const bool __cond = WARN_ON(cond);                             \
> -               if (unlikely(__cond))                                          \
> +               if (unlikely(__cond)) {                                        \
>                         WRITE_ONCE(kfence_enabled, false);                     \
> +                       disabled_by_warn = true;                               \
> +               }                                                              \
>                 __cond;                                                        \
>         })
>
>  /* === Data ================================================================= */
>
>  static bool kfence_enabled __read_mostly;
> +static bool disabled_by_warn __read_mostly;
>
>  unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
>  EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
> @@ -55,6 +58,7 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
>  #endif
>  #define MODULE_PARAM_PREFIX "kfence."
>
> +static int kfence_enable_late(void);
>  static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
>  {
>         unsigned long num;
> @@ -65,10 +69,11 @@ static int param_set_sample_interval(const char *val, const struct kernel_param
>
>         if (!num) /* Using 0 to indicate KFENCE is disabled. */
>                 WRITE_ONCE(kfence_enabled, false);
> -       else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
> -               return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
>
>         *((unsigned long *)kp->arg) = num;
> +
> +       if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
> +               return disabled_by_warn ? -EINVAL : kfence_enable_late();
>         return 0;
>  }
>
> @@ -787,6 +792,16 @@ void __init kfence_init(void)
>                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
>  }
>
> +static int kfence_enable_late(void)
> +{
> +       if (!__kfence_pool)
> +               return -EINVAL;
> +
> +       WRITE_ONCE(kfence_enabled, true);
> +       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> +       return 0;
> +}
> +
>  void kfence_shutdown_cache(struct kmem_cache *s)
>  {
>         unsigned long flags;
> --
> 2.27.0
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] kfence: Alloc kfence_pool after system startup
  2022-03-07  7:45 ` [PATCH v3 2/2] kfence: Alloc kfence_pool " Tianchen Ding
@ 2022-03-07 14:08   ` Marco Elver
  2022-03-10  1:43   ` liupeng (DM)
  1 sibling, 0 replies; 6+ messages in thread
From: Marco Elver @ 2022-03-07 14:08 UTC (permalink / raw)
  To: Tianchen Ding
  Cc: Alexander Potapenko, Dmitry Vyukov, Andrew Morton, kasan-dev,
	linux-mm, linux-kernel

On Mon, 7 Mar 2022 at 08:45, Tianchen Ding <dtcccc@linux.alibaba.com> wrote:
>
> Allow enabling KFENCE after system startup by allocating its pool via the
> page allocator. This provides the flexibility to enable KFENCE even if it
> wasn't enabled at boot time.
>
> Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>

This looks good, thanks!

Reviewed-by: Marco Elver <elver@google.com>
Tested-by: Marco Elver <elver@google.com>


> ---
>  mm/kfence/core.c | 111 ++++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 90 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index caa4e84c8b79..f126b53b9b85 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -96,7 +96,7 @@ static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
>  module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
>
>  /* The pool of pages used for guard pages and objects. */
> -char *__kfence_pool __ro_after_init;
> +char *__kfence_pool __read_mostly;
>  EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
>
>  /*
> @@ -537,17 +537,19 @@ static void rcu_guarded_free(struct rcu_head *h)
>         kfence_guarded_free((void *)meta->addr, meta, false);
>  }
>
> -static bool __init kfence_init_pool(void)
> +/*
> + * Initialization of the KFENCE pool after its allocation.
> + * Returns 0 on success; otherwise returns the address up to
> + * which partial initialization succeeded.
> + */
> +static unsigned long kfence_init_pool(void)
>  {
>         unsigned long addr = (unsigned long)__kfence_pool;
>         struct page *pages;
>         int i;
>
> -       if (!__kfence_pool)
> -               return false;
> -
>         if (!arch_kfence_init_pool())
> -               goto err;
> +               return addr;
>
>         pages = virt_to_page(addr);
>
> @@ -565,7 +567,7 @@ static bool __init kfence_init_pool(void)
>
>                 /* Verify we do not have a compound head page. */
>                 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
> -                       goto err;
> +                       return addr;
>
>                 __SetPageSlab(&pages[i]);
>         }
> @@ -578,7 +580,7 @@ static bool __init kfence_init_pool(void)
>          */
>         for (i = 0; i < 2; i++) {
>                 if (unlikely(!kfence_protect(addr)))
> -                       goto err;
> +                       return addr;
>
>                 addr += PAGE_SIZE;
>         }
> @@ -595,7 +597,7 @@ static bool __init kfence_init_pool(void)
>
>                 /* Protect the right redzone. */
>                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
> -                       goto err;
> +                       return addr;
>
>                 addr += 2 * PAGE_SIZE;
>         }
> @@ -608,9 +610,21 @@ static bool __init kfence_init_pool(void)
>          */
>         kmemleak_free(__kfence_pool);
>
> -       return true;
> +       return 0;
> +}
> +
> +static bool __init kfence_init_pool_early(void)
> +{
> +       unsigned long addr;
> +
> +       if (!__kfence_pool)
> +               return false;
> +
> +       addr = kfence_init_pool();
> +
> +       if (!addr)
> +               return true;
>
> -err:
>         /*
>          * Only release unprotected pages, and do not try to go back and change
>          * page attributes due to risk of failing to do so as well. If changing
> @@ -623,6 +637,26 @@ static bool __init kfence_init_pool(void)
>         return false;
>  }
>
> +static bool kfence_init_pool_late(void)
> +{
> +       unsigned long addr, free_size;
> +
> +       addr = kfence_init_pool();
> +
> +       if (!addr)
> +               return true;
> +
> +       /* Same as above. */
> +       free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
> +#ifdef CONFIG_CONTIG_ALLOC
> +       free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
> +#else
> +       free_pages_exact((void *)addr, free_size);
> +#endif
> +       __kfence_pool = NULL;
> +       return false;
> +}
> +
>  /* === DebugFS Interface ==================================================== */
>
>  static int stats_show(struct seq_file *seq, void *v)
> @@ -771,31 +805,66 @@ void __init kfence_alloc_pool(void)
>                 pr_err("failed to allocate pool\n");
>  }
>
> +static void kfence_init_enable(void)
> +{
> +       if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
> +               static_branch_enable(&kfence_allocation_key);
> +       WRITE_ONCE(kfence_enabled, true);
> +       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> +       pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
> +               CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
> +               (void *)(__kfence_pool + KFENCE_POOL_SIZE));
> +}
> +
>  void __init kfence_init(void)
>  {
> +       stack_hash_seed = (u32)random_get_entropy();
> +
>         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
>         if (!kfence_sample_interval)
>                 return;
>
> -       stack_hash_seed = (u32)random_get_entropy();
> -       if (!kfence_init_pool()) {
> +       if (!kfence_init_pool_early()) {
>                 pr_err("%s failed\n", __func__);
>                 return;
>         }
>
> -       if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
> -               static_branch_enable(&kfence_allocation_key);
> -       WRITE_ONCE(kfence_enabled, true);
> -       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> -       pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
> -               CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
> -               (void *)(__kfence_pool + KFENCE_POOL_SIZE));
> +       kfence_init_enable();
> +}
> +
> +static int kfence_init_late(void)
> +{
> +       const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
> +#ifdef CONFIG_CONTIG_ALLOC
> +       struct page *pages;
> +
> +       pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
> +       if (!pages)
> +               return -ENOMEM;
> +       __kfence_pool = page_to_virt(pages);
> +#else
> +       if (nr_pages > MAX_ORDER_NR_PAGES) {
> +               pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
> +               return -EINVAL;
> +       }
> +       __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
> +       if (!__kfence_pool)
> +               return -ENOMEM;
> +#endif
> +
> +       if (!kfence_init_pool_late()) {
> +               pr_err("%s failed\n", __func__);
> +               return -EBUSY;
> +       }
> +
> +       kfence_init_enable();
> +       return 0;
>  }
>
>  static int kfence_enable_late(void)
>  {
>         if (!__kfence_pool)
> -               return -EINVAL;
> +               return kfence_init_late();
>
>         WRITE_ONCE(kfence_enabled, true);
>         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> --
> 2.27.0
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] kfence: Alloc kfence_pool after system startup
  2022-03-07  7:45 ` [PATCH v3 2/2] kfence: Alloc kfence_pool " Tianchen Ding
  2022-03-07 14:08   ` Marco Elver
@ 2022-03-10  1:43   ` liupeng (DM)
  1 sibling, 0 replies; 6+ messages in thread
From: liupeng (DM) @ 2022-03-10  1:43 UTC (permalink / raw)
  To: Tianchen Ding, Alexander Potapenko, Marco Elver, Dmitry Vyukov,
	Andrew Morton
  Cc: kasan-dev, linux-mm, linux-kernel

On 2022/3/7 15:45, Tianchen Ding wrote:
> Allow enabling KFENCE after system startup by allocating its pool via the
> page allocator. This provides the flexibility to enable KFENCE even if it
> wasn't enabled at boot time.
>
> Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Tested-by: Peng Liu <liupeng256@huawei.com>
> ---
>   mm/kfence/core.c | 111 ++++++++++++++++++++++++++++++++++++++---------
>   1 file changed, 90 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index caa4e84c8b79..f126b53b9b85 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -96,7 +96,7 @@ static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
>   module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
>   
>   /* The pool of pages used for guard pages and objects. */
> -char *__kfence_pool __ro_after_init;
> +char *__kfence_pool __read_mostly;
>   EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
>   
>   /*
> @@ -537,17 +537,19 @@ static void rcu_guarded_free(struct rcu_head *h)
>   	kfence_guarded_free((void *)meta->addr, meta, false);
>   }
>   
> -static bool __init kfence_init_pool(void)
> +/*
> + * Initialization of the KFENCE pool after its allocation.
> + * Returns 0 on success; otherwise returns the address up to
> + * which partial initialization succeeded.
> + */
> +static unsigned long kfence_init_pool(void)
>   {
>   	unsigned long addr = (unsigned long)__kfence_pool;
>   	struct page *pages;
>   	int i;
>   
> -	if (!__kfence_pool)
> -		return false;
> -
>   	if (!arch_kfence_init_pool())
> -		goto err;
> +		return addr;
>   
>   	pages = virt_to_page(addr);
>   
> @@ -565,7 +567,7 @@ static bool __init kfence_init_pool(void)
>   
>   		/* Verify we do not have a compound head page. */
>   		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
> -			goto err;
> +			return addr;
>   
>   		__SetPageSlab(&pages[i]);
>   	}
> @@ -578,7 +580,7 @@ static bool __init kfence_init_pool(void)
>   	 */
>   	for (i = 0; i < 2; i++) {
>   		if (unlikely(!kfence_protect(addr)))
> -			goto err;
> +			return addr;
>   
>   		addr += PAGE_SIZE;
>   	}
> @@ -595,7 +597,7 @@ static bool __init kfence_init_pool(void)
>   
>   		/* Protect the right redzone. */
>   		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
> -			goto err;
> +			return addr;
>   
>   		addr += 2 * PAGE_SIZE;
>   	}
> @@ -608,9 +610,21 @@ static bool __init kfence_init_pool(void)
>   	 */
>   	kmemleak_free(__kfence_pool);
>   
> -	return true;
> +	return 0;
> +}
> +
> +static bool __init kfence_init_pool_early(void)
> +{
> +	unsigned long addr;
> +
> +	if (!__kfence_pool)
> +		return false;
> +
> +	addr = kfence_init_pool();
> +
> +	if (!addr)
> +		return true;
>   
> -err:
>   	/*
>   	 * Only release unprotected pages, and do not try to go back and change
>   	 * page attributes due to risk of failing to do so as well. If changing
> @@ -623,6 +637,26 @@ static bool __init kfence_init_pool(void)
>   	return false;
>   }
>   
> +static bool kfence_init_pool_late(void)
> +{
> +	unsigned long addr, free_size;
> +
> +	addr = kfence_init_pool();
> +
> +	if (!addr)
> +		return true;
> +
> +	/* Same as above. */
> +	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
> +#ifdef CONFIG_CONTIG_ALLOC
> +	free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
> +#else
> +	free_pages_exact((void *)addr, free_size);
> +#endif
> +	__kfence_pool = NULL;
> +	return false;
> +}
> +
>   /* === DebugFS Interface ==================================================== */
>   
>   static int stats_show(struct seq_file *seq, void *v)
> @@ -771,31 +805,66 @@ void __init kfence_alloc_pool(void)
>   		pr_err("failed to allocate pool\n");
>   }
>   
> +static void kfence_init_enable(void)
> +{
> +	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
> +		static_branch_enable(&kfence_allocation_key);
> +	WRITE_ONCE(kfence_enabled, true);
> +	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> +	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
> +		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
> +		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
> +}
> +
>   void __init kfence_init(void)
>   {
> +	stack_hash_seed = (u32)random_get_entropy();
> +
>   	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
>   	if (!kfence_sample_interval)
>   		return;
>   
> -	stack_hash_seed = (u32)random_get_entropy();
> -	if (!kfence_init_pool()) {
> +	if (!kfence_init_pool_early()) {
>   		pr_err("%s failed\n", __func__);
>   		return;
>   	}
>   
> -	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
> -		static_branch_enable(&kfence_allocation_key);
> -	WRITE_ONCE(kfence_enabled, true);
> -	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
> -	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
> -		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
> -		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
> +	kfence_init_enable();
> +}
> +
> +static int kfence_init_late(void)
> +{
> +	const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
> +#ifdef CONFIG_CONTIG_ALLOC
> +	struct page *pages;
> +
> +	pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
> +	if (!pages)
> +		return -ENOMEM;
> +	__kfence_pool = page_to_virt(pages);
> +#else
> +	if (nr_pages > MAX_ORDER_NR_PAGES) {
> +		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
> +		return -EINVAL;
> +	}
> +	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
> +	if (!__kfence_pool)
> +		return -ENOMEM;
> +#endif
> +
> +	if (!kfence_init_pool_late()) {
> +		pr_err("%s failed\n", __func__);
> +		return -EBUSY;
> +	}
> +
> +	kfence_init_enable();
> +	return 0;
>   }
>   
>   static int kfence_enable_late(void)
>   {
>   	if (!__kfence_pool)
> -		return -EINVAL;
> +		return kfence_init_late();
>   
>   	WRITE_ONCE(kfence_enabled, true);
>   	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-03-10  1:44 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-07  7:45 [PATCH v3 0/2] provide the flexibility to enable KFENCE Tianchen Ding
2022-03-07  7:45 ` [PATCH v3 1/2] kfence: Allow re-enabling KFENCE after system startup Tianchen Ding
2022-03-07 14:08   ` Marco Elver
2022-03-07  7:45 ` [PATCH v3 2/2] kfence: Alloc kfence_pool " Tianchen Ding
2022-03-07 14:08   ` Marco Elver
2022-03-10  1:43   ` liupeng (DM)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).