All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch] kcsan: Make it RT aware
@ 2021-08-21  6:50 Mike Galbraith
  2021-08-22 16:35 ` Mike Galbraith
  0 siblings, 1 reply; 6+ messages in thread
From: Mike Galbraith @ 2021-08-21  6:50 UTC (permalink / raw)
  To: linux-rt-users; +Cc: Sebastian Andrzej Siewior, Thomas Gleixner


Converting report_filterlist_lock to raw_spinlock_t lets RT report
problem free, but makes allocations in insert_report_filterlist()
problematic.  Solve that via unlock, allocate, relock and restart.

Signed-off-by: Mike Galbraith <efault@gmx.de>
---
 kernel/kcsan/debugfs.c |   62 +++++++++++++++++++++++++++++++++----------------
 1 file changed, 43 insertions(+), 19 deletions(-)

--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -53,7 +53,7 @@ static struct {
 	.sorted		= false,
 	.whitelist	= false,	/* default is blacklist */
 };
-static DEFINE_SPINLOCK(report_filterlist_lock);
+static DEFINE_RAW_SPINLOCK(report_filterlist_lock);

 /*
  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
@@ -110,7 +110,7 @@ bool kcsan_skip_report_debugfs(unsigned
 		return false;
 	func_addr -= offset; /* Get function start */

-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	if (report_filterlist.used == 0)
 		goto out;

@@ -127,7 +127,7 @@ bool kcsan_skip_report_debugfs(unsigned
 		ret = !ret;

 out:
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
 	return ret;
 }

@@ -135,9 +135,9 @@ static void set_report_filterlist_whitel
 {
 	unsigned long flags;

-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	report_filterlist.whitelist = whitelist;
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
 }

 /* Returns 0 on success, error-code otherwise. */
@@ -146,36 +146,60 @@ static ssize_t insert_report_filterlist(
 	unsigned long flags;
 	unsigned long addr = kallsyms_lookup_name(func);
 	ssize_t ret = 0;
+	int is_rt = IS_ENABLED(CONFIG_PREEMPT_RT);

 	if (!addr) {
 		pr_err("could not find function: '%s'\n", func);
 		return -ENOENT;
 	}

-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	if (is_rt && !preemptible())
+		return -ENOMEM;
+
+repeat:
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);

 	if (report_filterlist.addrs == NULL) {
-		/* initial allocation */
-		report_filterlist.addrs =
-			kmalloc_array(report_filterlist.size,
+		unsigned long *array;
+		if (is_rt)
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+		array = kmalloc_array(report_filterlist.size,
 				      sizeof(unsigned long), GFP_ATOMIC);
-		if (report_filterlist.addrs == NULL) {
+		if (is_rt)
+			raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+		if (!array) {
 			ret = -ENOMEM;
 			goto out;
 		}
+		if (is_rt && report_filterlist.addrs != NULL)  {
+			/* Someone beat us to it, move along to size check. */
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+			kfree(array);
+			goto repeat;
+		}
+		report_filterlist.addrs = array;
 	} else if (report_filterlist.used == report_filterlist.size) {
 		/* resize filterlist */
-		size_t new_size = report_filterlist.size * 2;
-		unsigned long *new_addrs =
-			krealloc(report_filterlist.addrs,
-				 new_size * sizeof(unsigned long), GFP_ATOMIC);
-
+		size_t old_size = report_filterlist.size;
+		size_t new_size = old_size * 2;
+		unsigned long *new_addrs;
+		if (is_rt)
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+		new_addrs = krealloc(report_filterlist.addrs,
+				     new_size * sizeof(unsigned long), GFP_ATOMIC);
+		if (is_rt)
+			raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 		if (new_addrs == NULL) {
 			/* leave filterlist itself untouched */
 			ret = -ENOMEM;
 			goto out;
 		}
-
+		if (is_rt && report_filterlist.size != old_size) {
+			/* Someone else resized while we were unlocked, recheck. */
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+			kfree(new_addrs);
+			goto repeat;
+		}
 		report_filterlist.size = new_size;
 		report_filterlist.addrs = new_addrs;
 	}
@@ -186,7 +210,7 @@ static ssize_t insert_report_filterlist(
 	report_filterlist.sorted = false;

 out:
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);

 	return ret;
 }
@@ -204,13 +228,13 @@ static int show_info(struct seq_file *fi
 	}

 	/* show filter functions, and filter type */
-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	seq_printf(file, "\n%s functions: %s\n",
 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
 		   report_filterlist.used == 0 ? "none" : "");
 	for (i = 0; i < report_filterlist.used; ++i)
 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);

 	return 0;
 }



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch] kcsan: Make it RT aware
  2021-08-21  6:50 [patch] kcsan: Make it RT aware Mike Galbraith
@ 2021-08-22 16:35 ` Mike Galbraith
  2021-08-22 17:29   ` Vlastimil Babka
  0 siblings, 1 reply; 6+ messages in thread
From: Mike Galbraith @ 2021-08-22 16:35 UTC (permalink / raw)
  To: linux-rt-users; +Cc: Sebastian Andrzej Siewior, Thomas Gleixner

On second thought, burn both patches.  Making K[AC]SAN work with RT
ain't worth the warts.

	-Mike


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch] kcsan: Make it RT aware
  2021-08-22 16:35 ` Mike Galbraith
@ 2021-08-22 17:29   ` Vlastimil Babka
  2021-08-26 15:18     ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 6+ messages in thread
From: Vlastimil Babka @ 2021-08-22 17:29 UTC (permalink / raw)
  To: Mike Galbraith, linux-rt-users; +Cc: Sebastian Andrzej Siewior, Thomas Gleixner

On 8/22/21 6:35 PM, Mike Galbraith wrote:
> On second thought, burn both patches.  Making K[AC]SAN work with RT
> ain't worth the warts.

The stackdepot part of kasan patch will be useful at some point.
slub_debug should eventually switch to stackdepot - it almost did in
mainline already. I'll try to remember and dig that part up when needed.
Thanks.

> 	-Mike
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch] kcsan: Make it RT aware
  2021-08-22 17:29   ` Vlastimil Babka
@ 2021-08-26 15:18     ` Sebastian Andrzej Siewior
  2021-08-26 15:42       ` Mike Galbraith
  0 siblings, 1 reply; 6+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-08-26 15:18 UTC (permalink / raw)
  To: Vlastimil Babka; +Cc: Mike Galbraith, linux-rt-users, Thomas Gleixner

On 2021-08-22 19:29:00 [+0200], Vlastimil Babka wrote:
> On 8/22/21 6:35 PM, Mike Galbraith wrote:
> > On second thought, burn both patches.  Making K[AC]SAN work with RT
> > ain't worth the warts.
> 
> The stackdepot part of kasan patch will be useful at some point.
> slub_debug should eventually switch to stackdepot - it almost did in
> mainline already. I'll try to remember and dig that part up when needed.
> Thanks.

If you want them dropped, sure. 
Regarding KASAN, if the on_each_cpu() is really needed and why the
workqueue version of it can't be used. Sure without it it is simpler…
The stackdepot doesn't look that bad. Maybe with pre-allocation like we
have for debug obj.

> > 	-Mike

Sebastian

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch] kcsan: Make it RT aware
  2021-08-26 15:18     ` Sebastian Andrzej Siewior
@ 2021-08-26 15:42       ` Mike Galbraith
  2021-08-26 15:48         ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 6+ messages in thread
From: Mike Galbraith @ 2021-08-26 15:42 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior, Vlastimil Babka
  Cc: linux-rt-users, Thomas Gleixner

On Thu, 2021-08-26 at 17:18 +0200, Sebastian Andrzej Siewior wrote:
> On 2021-08-22 19:29:00 [+0200], Vlastimil Babka wrote:
> > On 8/22/21 6:35 PM, Mike Galbraith wrote:
> > > On second thought, burn both patches.  Making K[AC]SAN work with RT
> > > ain't worth the warts.
> >
> > The stackdepot part of kasan patch will be useful at some point.
> > slub_debug should eventually switch to stackdepot - it almost did in
> > mainline already. I'll try to remember and dig that part up when
> > needed.
> > Thanks.
>
> If you want them dropped, sure.

If you think they're worth having, feel free.  The kcsan patch I found
especially icky, and changed it like so, avoiding icky config dependent
branches while giving myself an ever so tiny excuse for looping ;-)

kcsan: Make it RT aware

Converting report_filterlist_lock to raw_spinlock_t lets RT report
problem free, but makes allocations in insert_report_filterlist()
problematic.  Solve that by taking the lock after allocation.  That
means all configs must check that the situation didn't change while
acquiring if we're to avoid ugly config specific branches, but it
also removes the need for GFP_ATOMIC allocations.

Signed-off-by: Mike Galbraith <efault@gmx.de>
---
 kernel/kcsan/debugfs.c |   74 +++++++++++++++++++++++++++++--------------------
 1 file changed, 45 insertions(+), 29 deletions(-)

--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -53,7 +53,7 @@ static struct {
 	.sorted		= false,
 	.whitelist	= false,	/* default is blacklist */
 };
-static DEFINE_SPINLOCK(report_filterlist_lock);
+static DEFINE_RAW_SPINLOCK(report_filterlist_lock);

 /*
  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
@@ -110,7 +110,7 @@ bool kcsan_skip_report_debugfs(unsigned
 		return false;
 	func_addr -= offset; /* Get function start */

-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	if (report_filterlist.used == 0)
 		goto out;

@@ -127,7 +127,7 @@ bool kcsan_skip_report_debugfs(unsigned
 		ret = !ret;

 out:
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
 	return ret;
 }

@@ -135,9 +135,9 @@ static void set_report_filterlist_whitel
 {
 	unsigned long flags;

-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	report_filterlist.whitelist = whitelist;
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
 }

 /* Returns 0 on success, error-code otherwise. */
@@ -145,39 +145,56 @@ static ssize_t insert_report_filterlist(
 {
 	unsigned long flags;
 	unsigned long addr = kallsyms_lookup_name(func);
-	ssize_t ret = 0;

 	if (!addr) {
 		pr_err("could not find function: '%s'\n", func);
 		return -ENOENT;
 	}

-	spin_lock_irqsave(&report_filterlist_lock, flags);
-
+repeat:
 	if (report_filterlist.addrs == NULL) {
 		/* initial allocation */
-		report_filterlist.addrs =
-			kmalloc_array(report_filterlist.size,
-				      sizeof(unsigned long), GFP_ATOMIC);
-		if (report_filterlist.addrs == NULL) {
-			ret = -ENOMEM;
-			goto out;
+		unsigned long *array = kmalloc_array(report_filterlist.size,
+				    		     sizeof(unsigned long),
+						     GFP_KERNEL);
+		if (array == NULL)
+			return -ENOMEM;
+		raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+		if (report_filterlist.addrs != NULL)  {
+			/* Someone beat us to it, move along to resize check. */
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+			kfree(array);
+			goto repeat;
 		}
+		report_filterlist.addrs = array;
 	} else if (report_filterlist.used == report_filterlist.size) {
 		/* resize filterlist */
-		size_t new_size = report_filterlist.size * 2;
-		unsigned long *new_addrs =
-			krealloc(report_filterlist.addrs,
-				 new_size * sizeof(unsigned long), GFP_ATOMIC);
-
-		if (new_addrs == NULL) {
-			/* leave filterlist itself untouched */
-			ret = -ENOMEM;
-			goto out;
+		size_t old_size = report_filterlist.size;
+		size_t new_size = old_size * 2;
+		unsigned long *new_addrs = krealloc(report_filterlist.addrs,
+						    new_size * sizeof(unsigned long),
+						    GFP_KERNEL);
+		if (new_addrs == NULL)
+			return -ENOMEM;
+		raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+		if (report_filterlist.size != old_size) {
+			/* Someone else resized, recheck space availability. */
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+			kfree(new_addrs);
+			goto repeat;
 		}
-
 		report_filterlist.size = new_size;
 		report_filterlist.addrs = new_addrs;
+	} else {
+		/*
+		 * spekulatively, all looked fine, but we now must both take the
+		 * lock and verify space availability before proceeding.
+		 */
+		raw_spin_lock_irqsave(&report_filterlist_lock, flags);
+		if (report_filterlist.used == report_filterlist.size) {
+			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
+			goto repeat;
+		}
 	}

 	/* Note: deduplicating should be done in userspace. */
@@ -185,10 +202,9 @@ static ssize_t insert_report_filterlist(
 		kallsyms_lookup_name(func);
 	report_filterlist.sorted = false;

-out:
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);

-	return ret;
+	return 0;
 }

 static int show_info(struct seq_file *file, void *v)
@@ -204,13 +220,13 @@ static int show_info(struct seq_file *fi
 	}

 	/* show filter functions, and filter type */
-	spin_lock_irqsave(&report_filterlist_lock, flags);
+	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
 	seq_printf(file, "\n%s functions: %s\n",
 		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
 		   report_filterlist.used == 0 ? "none" : "");
 	for (i = 0; i < report_filterlist.used; ++i)
 		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
-	spin_unlock_irqrestore(&report_filterlist_lock, flags);
+	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);

 	return 0;
 }





^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch] kcsan: Make it RT aware
  2021-08-26 15:42       ` Mike Galbraith
@ 2021-08-26 15:48         ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 6+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-08-26 15:48 UTC (permalink / raw)
  To: Mike Galbraith; +Cc: Vlastimil Babka, linux-rt-users, Thomas Gleixner

On 2021-08-26 17:42:21 [+0200], Mike Galbraith wrote:
> On Thu, 2021-08-26 at 17:18 +0200, Sebastian Andrzej Siewior wrote:
> > On 2021-08-22 19:29:00 [+0200], Vlastimil Babka wrote:
> > > On 8/22/21 6:35 PM, Mike Galbraith wrote:
> > > > On second thought, burn both patches.  Making K[AC]SAN work with RT
> > > > ain't worth the warts.
> > >
> > > The stackdepot part of kasan patch will be useful at some point.
> > > slub_debug should eventually switch to stackdepot - it almost did in
> > > mainline already. I'll try to remember and dig that part up when
> > > needed.
> > > Thanks.
> >
> > If you want them dropped, sure.
> 
> If you think they're worth having, feel free.  The kcsan patch I found
> especially icky, and changed it like so, avoiding icky config dependent
> branches while giving myself an ever so tiny excuse for looping ;-)
> 
> kcsan: Make it RT aware
> 
> Converting report_filterlist_lock to raw_spinlock_t lets RT report
> problem free, but makes allocations in insert_report_filterlist()
> problematic.  Solve that by taking the lock after allocation.  That
> means all configs must check that the situation didn't change while
> acquiring if we're to avoid ugly config specific branches, but it
> also removes the need for GFP_ATOMIC allocations.
> 
> Signed-off-by: Mike Galbraith <efault@gmx.de>

I would have to look at this raw lock change now more closely…

Sebastian

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-08-26 15:48 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-21  6:50 [patch] kcsan: Make it RT aware Mike Galbraith
2021-08-22 16:35 ` Mike Galbraith
2021-08-22 17:29   ` Vlastimil Babka
2021-08-26 15:18     ` Sebastian Andrzej Siewior
2021-08-26 15:42       ` Mike Galbraith
2021-08-26 15:48         ` Sebastian Andrzej Siewior

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.