linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm: mmap_lock: fix use-after-free race and css ref leak in tracepoints
@ 2020-12-07 21:33 Axel Rasmussen
  2020-12-08  1:27 ` Andrew Morton
  0 siblings, 1 reply; 3+ messages in thread
From: Axel Rasmussen @ 2020-12-07 21:33 UTC (permalink / raw)
  To: Andrew Morton, Chinwen Chang, Daniel Jordan, David Rientjes,
	Davidlohr Bueso, Ingo Molnar, Jann Horn, Laurent Dufour,
	Michel Lespinasse, Stephen Rothwell, Steven Rostedt,
	Vlastimil Babka
  Cc: Yafang Shao, David S . Miller, dsahern, Greg Kroah-Hartman,
	Jakub Kicinski, liuhangbin, Tejun Heo, Shakeel Butt, Greg Thelen,
	linux-kernel, linux-mm, Axel Rasmussen

syzbot reported[1] a use-after-free introduced in 0f818c4bc1f3. The bug
is that an ongoing trace event might race with the tracepoint being
disabled (and therefore the _unreg() callback being called). Consider
this ordering:

T1: trace event fires, get_mm_memcg_path() is called
T1: get_memcg_path_buf() returns a buffer pointer
T2: trace_mmap_lock_unreg() is called, buffers are freed
T1: cgroup_path() is called with the now-freed buffer

The solution in this commit is to switch to mutex + RCU. With the RCU
API we can first stop new buffers from being handed out, then wait for
existing users to finish, and *then* free the buffers.

I have a simple reproducer program which spins up two pools of threads,
doing the following in a tight loop:

  Pool 1:
  mmap(NULL, 4096, PROT_READ | PROT_WRITE,
       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
  munmap()

  Pool 2:
  echo 1 > /sys/kernel/debug/tracing/events/mmap_lock/enable
  echo 0 > /sys/kernel/debug/tracing/events/mmap_lock/enable

This triggers the use-after-free very quickly. With this patch, I let it
run for an hour without any BUGs.

While fixing this, I also noticed and fixed a css ref leak. Previously
we called get_mem_cgroup_from_mm(), but we never called css_put() to
release that reference. get_mm_memcg_path() now does this properly.

[1]: https://syzkaller.appspot.com/bug?extid=19e6dd9943972fa1c58a

Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
---
 mm/mmap_lock.c | 123 +++++++++++++++++++++++++++++++++----------------
 1 file changed, 83 insertions(+), 40 deletions(-)

diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index 12af8f1b8a14..dcdde4f722a4 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -6,9 +6,10 @@
 #include <linux/cgroup.h>
 #include <linux/memcontrol.h>
 #include <linux/mmap_lock.h>
+#include <linux/mutex.h>
 #include <linux/percpu.h>
+#include <linux/rcupdate.h>
 #include <linux/smp.h>
-#include <linux/spinlock.h>
 #include <linux/trace_events.h>
 
 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
@@ -23,8 +24,8 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
  * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
  * been made.
  */
-static DEFINE_SPINLOCK(reg_lock);
-static int reg_refcount;
+static DEFINE_MUTEX(reg_lock);
+static int reg_refcount; /* Protected by reg_lock. */
 
 /*
  * Size of the buffer for memcg path names. Ignoring stack trace support,
@@ -38,99 +39,141 @@ static int reg_refcount;
  */
 #define CONTEXT_COUNT 4
 
-DEFINE_PER_CPU(char *, memcg_path_buf);
-DEFINE_PER_CPU(int, memcg_path_buf_idx);
+static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
+static char **tmp_bufs;
+static DEFINE_PER_CPU(int, memcg_path_buf_idx);
+
+/* Called with reg_lock held. */
+static void free_memcg_path_bufs(void)
+{
+	int cpu;
+	char **old = tmp_bufs;
+
+	for_each_possible_cpu(cpu) {
+		*(old++) = rcu_dereference_protected(
+			per_cpu(memcg_path_buf, cpu),
+			lockdep_is_held(&reg_lock));
+		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
+	}
+
+	/* Wait for inflight memcg_path_buf users to finish. */
+	synchronize_rcu();
+
+	old = tmp_bufs;
+	for_each_possible_cpu(cpu) {
+		kfree(*(old++));
+	}
+
+	kfree(tmp_bufs);
+	tmp_bufs = NULL;
+}
 
 int trace_mmap_lock_reg(void)
 {
-	unsigned long flags;
 	int cpu;
+	char *new;
 
-	spin_lock_irqsave(&reg_lock, flags);
+	mutex_lock(&reg_lock);
 
+	/* If the refcount is going 0->1, proceed with allocating buffers. */
 	if (reg_refcount++)
 		goto out;
 
+	tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
+				 GFP_KERNEL);
+	if (tmp_bufs == NULL)
+		goto out_fail;
+
 	for_each_possible_cpu(cpu) {
-		per_cpu(memcg_path_buf, cpu) = NULL;
-	}
-	for_each_possible_cpu(cpu) {
-		per_cpu(memcg_path_buf, cpu) = kmalloc(
-			MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_NOWAIT);
-		if (per_cpu(memcg_path_buf, cpu) == NULL)
-			goto out_fail;
-		per_cpu(memcg_path_buf_idx, cpu) = 0;
+		new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
+		if (new == NULL)
+			goto out_fail_free;
+		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
+		/* Don't need to wait for inflights, they'd have gotten NULL. */
 	}
 
 out:
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 	return 0;
 
+out_fail_free:
+	free_memcg_path_bufs();
 out_fail:
-	for_each_possible_cpu(cpu) {
-		if (per_cpu(memcg_path_buf, cpu) != NULL)
-			kfree(per_cpu(memcg_path_buf, cpu));
-		else
-			break;
-	}
-
+	/* Since we failed, undo the earlier ref increment. */
 	--reg_refcount;
 
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 	return -ENOMEM;
 }
 
 void trace_mmap_lock_unreg(void)
 {
-	unsigned long flags;
-	int cpu;
-
-	spin_lock_irqsave(&reg_lock, flags);
+	mutex_lock(&reg_lock);
 
+	/* If the refcount is going 1->0, proceed with freeing buffers. */
 	if (--reg_refcount)
 		goto out;
 
-	for_each_possible_cpu(cpu) {
-		kfree(per_cpu(memcg_path_buf, cpu));
-	}
+	free_memcg_path_bufs();
 
 out:
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 }
 
 static inline char *get_memcg_path_buf(void)
 {
+	char *buf;
 	int idx;
 
+	rcu_read_lock();
+	buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
+	if (buf == NULL) {
+		rcu_read_unlock();
+		return NULL;
+	}
 	idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE) -
 	      MEMCG_PATH_BUF_SIZE;
-	return &this_cpu_read(memcg_path_buf)[idx];
+	return &buf[idx];
 }
 
 static inline void put_memcg_path_buf(void)
 {
 	this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
+	rcu_read_unlock();
 }
 
 /*
  * Write the given mm_struct's memcg path to a percpu buffer, and return a
- * pointer to it. If the path cannot be determined, NULL is returned.
+ * pointer to it. If the path cannot be determined, or no buffer was available
+ * (because the trace event is being unregistered), NULL is returned.
  *
  * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
  * disabled by the caller before calling us, and re-enabled only after the
  * caller is done with the pointer.
+ *
+ * The caller must call put_memcg_path_buf() once the buffer is no longer
+ * needed. This must be done while preemption is still disabled.
  */
 static const char *get_mm_memcg_path(struct mm_struct *mm)
 {
+	char *buf = NULL;
 	struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
 
-	if (memcg != NULL && likely(memcg->css.cgroup != NULL)) {
-		char *buf = get_memcg_path_buf();
+	if (memcg == NULL)
+		goto out;
+	if (unlikely(memcg->css.cgroup == NULL))
+		goto out_put;
 
-		cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
-		return buf;
-	}
-	return NULL;
+	buf = get_memcg_path_buf();
+	if (buf == NULL)
+		goto out_put;
+
+	cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
+
+out_put:
+	css_put(&memcg->css);
+out:
+	return buf;
 }
 
 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
-- 
2.29.2.576.ga3fc446d84-goog



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] mm: mmap_lock: fix use-after-free race and css ref leak in tracepoints
  2020-12-07 21:33 [PATCH v3] mm: mmap_lock: fix use-after-free race and css ref leak in tracepoints Axel Rasmussen
@ 2020-12-08  1:27 ` Andrew Morton
  2020-12-08  1:40   ` Axel Rasmussen
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Morton @ 2020-12-08  1:27 UTC (permalink / raw)
  To: Axel Rasmussen
  Cc: Chinwen Chang, Daniel Jordan, David Rientjes, Davidlohr Bueso,
	Ingo Molnar, Jann Horn, Laurent Dufour, Michel Lespinasse,
	Stephen Rothwell, Steven Rostedt, Vlastimil Babka, Yafang Shao,
	David S . Miller, dsahern, Greg Kroah-Hartman, Jakub Kicinski,
	liuhangbin, Tejun Heo, Shakeel Butt, Greg Thelen, linux-kernel,
	linux-mm

On Mon,  7 Dec 2020 13:33:58 -0800 Axel Rasmussen <axelrasmussen@google.com> wrote:

> syzbot reported[1] a use-after-free introduced in 0f818c4bc1f3. The bug
> is that an ongoing trace event might race with the tracepoint being
> disabled (and therefore the _unreg() callback being called). Consider
> this ordering:
> 
> T1: trace event fires, get_mm_memcg_path() is called
> T1: get_memcg_path_buf() returns a buffer pointer
> T2: trace_mmap_lock_unreg() is called, buffers are freed
> T1: cgroup_path() is called with the now-freed buffer
> 
> The solution in this commit is to switch to mutex + RCU. With the RCU
> API we can first stop new buffers from being handed out, then wait for
> existing users to finish, and *then* free the buffers.
> 
> I have a simple reproducer program which spins up two pools of threads,
> doing the following in a tight loop:
> 
>   Pool 1:
>   mmap(NULL, 4096, PROT_READ | PROT_WRITE,
>        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
>   munmap()
> 
>   Pool 2:
>   echo 1 > /sys/kernel/debug/tracing/events/mmap_lock/enable
>   echo 0 > /sys/kernel/debug/tracing/events/mmap_lock/enable
> 
> This triggers the use-after-free very quickly. With this patch, I let it
> run for an hour without any BUGs.
> 
> While fixing this, I also noticed and fixed a css ref leak. Previously
> we called get_mem_cgroup_from_mm(), but we never called css_put() to
> release that reference. get_mm_memcg_path() now does this properly.
> 
> [1]: https://syzkaller.appspot.com/bug?extid=19e6dd9943972fa1c58a
> 

So... how does this fix differ from the previous version of this fix?

The difference is quite large:

--- a/mm/mmap_lock.c~mmap_lock-add-tracepoints-around-lock-acquisition-fix-fix
+++ a/mm/mmap_lock.c
@@ -3,13 +3,13 @@
 #include <trace/events/mmap_lock.h>
 
 #include <linux/mm.h>
-#include <linux/atomic.h>
 #include <linux/cgroup.h>
 #include <linux/memcontrol.h>
 #include <linux/mmap_lock.h>
+#include <linux/mutex.h>
 #include <linux/percpu.h>
+#include <linux/rcupdate.h>
 #include <linux/smp.h>
-#include <linux/spinlock.h>
 #include <linux/trace_events.h>
 
 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
@@ -19,28 +19,13 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_relea
 #ifdef CONFIG_MEMCG
 
 /*
- * This is unfortunately complicated... _reg() and _unreg() may be called
- * in parallel, separately for each of our three event types. To save memory,
- * all of the event types share the same buffers. Furthermore, trace events
- * might happen in parallel with _unreg(); we need to ensure we don't free the
- * buffers before all inflights have finished. Because these events happen
- * "frequently", we also want to prevent new inflights from starting once the
- * _unreg() process begins. And, for performance reasons, we want to avoid any
- * locking in the trace event path.
- *
- * So:
- *
- * - Use a spinlock to serialize _reg() and _unreg() calls.
- * - Keep track of nested _reg() calls with a lock-protected counter.
- * - Define a flag indicating whether or not unregistration has begun (and
- *   therefore that there should be no new buffer uses going forward).
- * - Keep track of inflight buffer users with a reference count.
+ * Our various events all share the same buffer (because we don't want or need
+ * to allocate a set of buffers *per event type*), so we need to protect against
+ * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
+ * been made.
  */
-static DEFINE_SPINLOCK(reg_lock);
-static int reg_types_rc; /* Protected by reg_lock. */
-static bool unreg_started; /* Doesn't need synchronization. */
-/* atomic_t instead of refcount_t, as we want ordered inc without locks. */
-static atomic_t inflight_rc = ATOMIC_INIT(0);
+static DEFINE_MUTEX(reg_lock);
+static int reg_refcount; /* Protected by reg_lock. */
 
 /*
  * Size of the buffer for memcg path names. Ignoring stack trace support,
@@ -54,119 +39,107 @@ static atomic_t inflight_rc = ATOMIC_INI
  */
 #define CONTEXT_COUNT 4
 
-DEFINE_PER_CPU(char *, memcg_path_buf);
-DEFINE_PER_CPU(int, memcg_path_buf_idx);
+static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
+static char **tmp_bufs;
+static DEFINE_PER_CPU(int, memcg_path_buf_idx);
+
+/* Called with reg_lock held. */
+static void free_memcg_path_bufs(void)
+{
+	int cpu;
+	char **old = tmp_bufs;
+
+	for_each_possible_cpu(cpu) {
+		*(old++) = rcu_dereference_protected(
+			per_cpu(memcg_path_buf, cpu),
+			lockdep_is_held(&reg_lock));
+		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
+	}
+
+	/* Wait for inflight memcg_path_buf users to finish. */
+	synchronize_rcu();
+
+	old = tmp_bufs;
+	for_each_possible_cpu(cpu) {
+		kfree(*(old++));
+	}
+
+	kfree(tmp_bufs);
+	tmp_bufs = NULL;
+}
 
 int trace_mmap_lock_reg(void)
 {
-	unsigned long flags;
 	int cpu;
+	char *new;
 
-	/*
-	 * Serialize _reg() and _unreg(). Without this, e.g. _unreg() might
-	 * start cleaning up while _reg() is only partially completed.
-	 */
-	spin_lock_irqsave(&reg_lock, flags);
+	mutex_lock(&reg_lock);
 
 	/* If the refcount is going 0->1, proceed with allocating buffers. */
-	if (reg_types_rc++)
+	if (reg_refcount++)
 		goto out;
 
+	tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
+				 GFP_KERNEL);
+	if (tmp_bufs == NULL)
+		goto out_fail;
+
 	for_each_possible_cpu(cpu) {
-		per_cpu(memcg_path_buf, cpu) = NULL;
-	}
-	for_each_possible_cpu(cpu) {
-		per_cpu(memcg_path_buf, cpu) = kmalloc(
-			MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_NOWAIT);
-		if (per_cpu(memcg_path_buf, cpu) == NULL)
-			goto out_fail;
-		per_cpu(memcg_path_buf_idx, cpu) = 0;
+		new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
+		if (new == NULL)
+			goto out_fail_free;
+		rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
+		/* Don't need to wait for inflights, they'd have gotten NULL. */
 	}
 
-	/* Reset unreg_started flag, allowing new trace events. */
-	WRITE_ONCE(unreg_started, false);
-	/* Add the registration +1 to the inflight refcount. */
-	atomic_inc(&inflight_rc);
-
 out:
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 	return 0;
 
+out_fail_free:
+	free_memcg_path_bufs();
 out_fail:
-	for_each_possible_cpu(cpu) {
-		if (per_cpu(memcg_path_buf, cpu) != NULL)
-			kfree(per_cpu(memcg_path_buf, cpu));
-		else
-			break;
-	}
+	/* Since we failed, undo the earlier ref increment. */
+	--reg_refcount;
 
-	/* Since we failed, undo the earlier increment. */
-	--reg_types_rc;
-
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 	return -ENOMEM;
 }
 
 void trace_mmap_lock_unreg(void)
 {
-	unsigned long flags;
-	int cpu;
-
-	spin_lock_irqsave(&reg_lock, flags);
+	mutex_lock(&reg_lock);
 
 	/* If the refcount is going 1->0, proceed with freeing buffers. */
-	if (--reg_types_rc)
+	if (--reg_refcount)
 		goto out;
 
-	/* This was the last registration; start preventing new events... */
-	WRITE_ONCE(unreg_started, true);
-	/* Remove the registration +1 from the inflight refcount. */
-	atomic_dec(&inflight_rc);
-	/*
-	 * Wait for inflight refcount to be zero (all inflights stopped). Since
-	 * we have a spinlock we can't sleep, so just spin. Because trace events
-	 * are "fast", and because we stop new inflights from starting at this
-	 * point with unreg_started, this should be a short spin.
-	 */
-	while (atomic_read(&inflight_rc))
-		barrier();
-
-	for_each_possible_cpu(cpu) {
-		kfree(per_cpu(memcg_path_buf, cpu));
-	}
+	free_memcg_path_bufs();
 
 out:
-	spin_unlock_irqrestore(&reg_lock, flags);
+	mutex_unlock(&reg_lock);
 }
 
 static inline char *get_memcg_path_buf(void)
 {
+	char *buf;
 	int idx;
 
-	/*
-	 * If unregistration is happening, stop. Yes, this check is racy;
-	 * that's fine. It just means _unreg() might spin waiting for an extra
-	 * event or two. Use-after-free is actually prevented by the refcount.
-	 */
-	if (READ_ONCE(unreg_started))
+	rcu_read_lock();
+	buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
+	if (buf == NULL) {
+		rcu_read_unlock();
 		return NULL;
-	/*
-	 * Take a reference, unless the registration +1 has been released
-	 * and there aren't already existing inflights (refcount is zero).
-	 */
-	if (!atomic_inc_not_zero(&inflight_rc))
-		return NULL;
-
+	}
 	idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE) -
 	      MEMCG_PATH_BUF_SIZE;
-	return &this_cpu_read(memcg_path_buf)[idx];
+	return &buf[idx];
 }
 
 static inline void put_memcg_path_buf(void)
 {
 	this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
-	/* We're done with this buffer; drop the reference. */
-	atomic_dec(&inflight_rc);
+	rcu_read_unlock();
 }
 
 /*
_



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] mm: mmap_lock: fix use-after-free race and css ref leak in tracepoints
  2020-12-08  1:27 ` Andrew Morton
@ 2020-12-08  1:40   ` Axel Rasmussen
  0 siblings, 0 replies; 3+ messages in thread
From: Axel Rasmussen @ 2020-12-08  1:40 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Chinwen Chang, Daniel Jordan, David Rientjes, Davidlohr Bueso,
	Ingo Molnar, Jann Horn, Laurent Dufour, Michel Lespinasse,
	Stephen Rothwell, Steven Rostedt, Vlastimil Babka, Yafang Shao,
	David S . Miller, dsahern, Greg Kroah-Hartman, Jakub Kicinski,
	liuhangbin, Tejun Heo, Shakeel Butt, Greg Thelen, LKML, Linux MM

[-- Attachment #1: Type: text/plain, Size: 10917 bytes --]

On Mon, Dec 7, 2020 at 5:27 PM Andrew Morton <akpm@linux-foundation.org>
wrote:

> On Mon,  7 Dec 2020 13:33:58 -0800 Axel Rasmussen <
> axelrasmussen@google.com> wrote:
>
> > syzbot reported[1] a use-after-free introduced in 0f818c4bc1f3. The bug
> > is that an ongoing trace event might race with the tracepoint being
> > disabled (and therefore the _unreg() callback being called). Consider
> > this ordering:
> >
> > T1: trace event fires, get_mm_memcg_path() is called
> > T1: get_memcg_path_buf() returns a buffer pointer
> > T2: trace_mmap_lock_unreg() is called, buffers are freed
> > T1: cgroup_path() is called with the now-freed buffer
> >
> > The solution in this commit is to switch to mutex + RCU. With the RCU
> > API we can first stop new buffers from being handed out, then wait for
> > existing users to finish, and *then* free the buffers.
> >
> > I have a simple reproducer program which spins up two pools of threads,
> > doing the following in a tight loop:
> >
> >   Pool 1:
> >   mmap(NULL, 4096, PROT_READ | PROT_WRITE,
> >        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
> >   munmap()
> >
> >   Pool 2:
> >   echo 1 > /sys/kernel/debug/tracing/events/mmap_lock/enable
> >   echo 0 > /sys/kernel/debug/tracing/events/mmap_lock/enable
> >
> > This triggers the use-after-free very quickly. With this patch, I let it
> > run for an hour without any BUGs.
> >
> > While fixing this, I also noticed and fixed a css ref leak. Previously
> > we called get_mem_cgroup_from_mm(), but we never called css_put() to
> > release that reference. get_mm_memcg_path() now does this properly.
> >
> > [1]: https://syzkaller.appspot.com/bug?extid=19e6dd9943972fa1c58a
> >
>
> So... how does this fix differ from the previous version of this fix?
>

Apologies, I'll include a cover letter with this information next time. The
diff below from your mail includes both of these steps (v1 -> v3).

Changes from v2 -> v3:

Split up the free loop, so now we do it in three steps: 1) loop through
setting the buffers to NULL, 2) synchronize_rcu() *once*, 3) loop through
freeing each of the buffers. This requires allocating some memory to hold
the not-yet-freed buffer pointers, but it means much less waiting as doing
synchronize_rcu() in a loop is expensive. Again, per Steven's suggestion.

Changes from v1 -> v2:

Rewrote the fix to use mutex + RCU instead of doing some hand-rolled
reference count thing, as per Steven's suggestion.


>
> The difference is quite large:
>
> ---
> a/mm/mmap_lock.c~mmap_lock-add-tracepoints-around-lock-acquisition-fix-fix
> +++ a/mm/mmap_lock.c
> @@ -3,13 +3,13 @@
>  #include <trace/events/mmap_lock.h>
>
>  #include <linux/mm.h>
> -#include <linux/atomic.h>
>  #include <linux/cgroup.h>
>  #include <linux/memcontrol.h>
>  #include <linux/mmap_lock.h>
> +#include <linux/mutex.h>
>  #include <linux/percpu.h>
> +#include <linux/rcupdate.h>
>  #include <linux/smp.h>
> -#include <linux/spinlock.h>
>  #include <linux/trace_events.h>
>
>  EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
> @@ -19,28 +19,13 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_relea
>  #ifdef CONFIG_MEMCG
>
>  /*
> - * This is unfortunately complicated... _reg() and _unreg() may be called
> - * in parallel, separately for each of our three event types. To save
> memory,
> - * all of the event types share the same buffers. Furthermore, trace
> events
> - * might happen in parallel with _unreg(); we need to ensure we don't
> free the
> - * buffers before all inflights have finished. Because these events happen
> - * "frequently", we also want to prevent new inflights from starting once
> the
> - * _unreg() process begins. And, for performance reasons, we want to
> avoid any
> - * locking in the trace event path.
> - *
> - * So:
> - *
> - * - Use a spinlock to serialize _reg() and _unreg() calls.
> - * - Keep track of nested _reg() calls with a lock-protected counter.
> - * - Define a flag indicating whether or not unregistration has begun (and
> - *   therefore that there should be no new buffer uses going forward).
> - * - Keep track of inflight buffer users with a reference count.
> + * Our various events all share the same buffer (because we don't want or
> need
> + * to allocate a set of buffers *per event type*), so we need to protect
> against
> + * concurrent _reg() and _unreg() calls, and count how many _reg() calls
> have
> + * been made.
>   */
> -static DEFINE_SPINLOCK(reg_lock);
> -static int reg_types_rc; /* Protected by reg_lock. */
> -static bool unreg_started; /* Doesn't need synchronization. */
> -/* atomic_t instead of refcount_t, as we want ordered inc without locks.
> */
> -static atomic_t inflight_rc = ATOMIC_INIT(0);
> +static DEFINE_MUTEX(reg_lock);
> +static int reg_refcount; /* Protected by reg_lock. */
>
>  /*
>   * Size of the buffer for memcg path names. Ignoring stack trace support,
> @@ -54,119 +39,107 @@ static atomic_t inflight_rc = ATOMIC_INI
>   */
>  #define CONTEXT_COUNT 4
>
> -DEFINE_PER_CPU(char *, memcg_path_buf);
> -DEFINE_PER_CPU(int, memcg_path_buf_idx);
> +static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
> +static char **tmp_bufs;
> +static DEFINE_PER_CPU(int, memcg_path_buf_idx);
> +
> +/* Called with reg_lock held. */
> +static void free_memcg_path_bufs(void)
> +{
> +       int cpu;
> +       char **old = tmp_bufs;
> +
> +       for_each_possible_cpu(cpu) {
> +               *(old++) = rcu_dereference_protected(
> +                       per_cpu(memcg_path_buf, cpu),
> +                       lockdep_is_held(&reg_lock));
> +               rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
> +       }
> +
> +       /* Wait for inflight memcg_path_buf users to finish. */
> +       synchronize_rcu();
> +
> +       old = tmp_bufs;
> +       for_each_possible_cpu(cpu) {
> +               kfree(*(old++));
> +       }
> +
> +       kfree(tmp_bufs);
> +       tmp_bufs = NULL;
> +}
>
>  int trace_mmap_lock_reg(void)
>  {
> -       unsigned long flags;
>         int cpu;
> +       char *new;
>
> -       /*
> -        * Serialize _reg() and _unreg(). Without this, e.g. _unreg() might
> -        * start cleaning up while _reg() is only partially completed.
> -        */
> -       spin_lock_irqsave(&reg_lock, flags);
> +       mutex_lock(&reg_lock);
>
>         /* If the refcount is going 0->1, proceed with allocating buffers.
> */
> -       if (reg_types_rc++)
> +       if (reg_refcount++)
>                 goto out;
>
> +       tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
> +                                GFP_KERNEL);
> +       if (tmp_bufs == NULL)
> +               goto out_fail;
> +
>         for_each_possible_cpu(cpu) {
> -               per_cpu(memcg_path_buf, cpu) = NULL;
> -       }
> -       for_each_possible_cpu(cpu) {
> -               per_cpu(memcg_path_buf, cpu) = kmalloc(
> -                       MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_NOWAIT);
> -               if (per_cpu(memcg_path_buf, cpu) == NULL)
> -                       goto out_fail;
> -               per_cpu(memcg_path_buf_idx, cpu) = 0;
> +               new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT,
> GFP_KERNEL);
> +               if (new == NULL)
> +                       goto out_fail_free;
> +               rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
> +               /* Don't need to wait for inflights, they'd have gotten
> NULL. */
>         }
>
> -       /* Reset unreg_started flag, allowing new trace events. */
> -       WRITE_ONCE(unreg_started, false);
> -       /* Add the registration +1 to the inflight refcount. */
> -       atomic_inc(&inflight_rc);
> -
>  out:
> -       spin_unlock_irqrestore(&reg_lock, flags);
> +       mutex_unlock(&reg_lock);
>         return 0;
>
> +out_fail_free:
> +       free_memcg_path_bufs();
>  out_fail:
> -       for_each_possible_cpu(cpu) {
> -               if (per_cpu(memcg_path_buf, cpu) != NULL)
> -                       kfree(per_cpu(memcg_path_buf, cpu));
> -               else
> -                       break;
> -       }
> +       /* Since we failed, undo the earlier ref increment. */
> +       --reg_refcount;
>
> -       /* Since we failed, undo the earlier increment. */
> -       --reg_types_rc;
> -
> -       spin_unlock_irqrestore(&reg_lock, flags);
> +       mutex_unlock(&reg_lock);
>         return -ENOMEM;
>  }
>
>  void trace_mmap_lock_unreg(void)
>  {
> -       unsigned long flags;
> -       int cpu;
> -
> -       spin_lock_irqsave(&reg_lock, flags);
> +       mutex_lock(&reg_lock);
>
>         /* If the refcount is going 1->0, proceed with freeing buffers. */
> -       if (--reg_types_rc)
> +       if (--reg_refcount)
>                 goto out;
>
> -       /* This was the last registration; start preventing new events...
> */
> -       WRITE_ONCE(unreg_started, true);
> -       /* Remove the registration +1 from the inflight refcount. */
> -       atomic_dec(&inflight_rc);
> -       /*
> -        * Wait for inflight refcount to be zero (all inflights stopped).
> Since
> -        * we have a spinlock we can't sleep, so just spin. Because trace
> events
> -        * are "fast", and because we stop new inflights from starting at
> this
> -        * point with unreg_started, this should be a short spin.
> -        */
> -       while (atomic_read(&inflight_rc))
> -               barrier();
> -
> -       for_each_possible_cpu(cpu) {
> -               kfree(per_cpu(memcg_path_buf, cpu));
> -       }
> +       free_memcg_path_bufs();
>
>  out:
> -       spin_unlock_irqrestore(&reg_lock, flags);
> +       mutex_unlock(&reg_lock);
>  }
>
>  static inline char *get_memcg_path_buf(void)
>  {
> +       char *buf;
>         int idx;
>
> -       /*
> -        * If unregistration is happening, stop. Yes, this check is racy;
> -        * that's fine. It just means _unreg() might spin waiting for an
> extra
> -        * event or two. Use-after-free is actually prevented by the
> refcount.
> -        */
> -       if (READ_ONCE(unreg_started))
> +       rcu_read_lock();
> +       buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
> +       if (buf == NULL) {
> +               rcu_read_unlock();
>                 return NULL;
> -       /*
> -        * Take a reference, unless the registration +1 has been released
> -        * and there aren't already existing inflights (refcount is zero).
> -        */
> -       if (!atomic_inc_not_zero(&inflight_rc))
> -               return NULL;
> -
> +       }
>         idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE)
> -
>               MEMCG_PATH_BUF_SIZE;
> -       return &this_cpu_read(memcg_path_buf)[idx];
> +       return &buf[idx];
>  }
>
>  static inline void put_memcg_path_buf(void)
>  {
>         this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
> -       /* We're done with this buffer; drop the reference. */
> -       atomic_dec(&inflight_rc);
> +       rcu_read_unlock();
>  }
>
>  /*
> _
>
>

[-- Attachment #2: Type: text/html, Size: 13525 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-12-08  1:41 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-07 21:33 [PATCH v3] mm: mmap_lock: fix use-after-free race and css ref leak in tracepoints Axel Rasmussen
2020-12-08  1:27 ` Andrew Morton
2020-12-08  1:40   ` Axel Rasmussen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).