All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dmitry Vyukov <dvyukov@google.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	syzkaller <syzkaller@googlegroups.com>
Subject: Re: perf: use-after-free in perf_release
Date: Mon, 6 Mar 2017 13:27:41 +0100	[thread overview]
Message-ID: <CACT4Y+YNwcjebv9yK3S8xGcLqbrqq+Cb3+zbJWMnoinpt82dtg@mail.gmail.com> (raw)
In-Reply-To: <20170306122327.GJ6500@twins.programming.kicks-ass.net>

[-- Attachment #1: Type: text/plain, Size: 1110 bytes --]

On Mon, Mar 6, 2017 at 1:23 PM, Peter Zijlstra <peterz@infradead.org> wrote:
> On Mon, Mar 06, 2017 at 01:17:42PM +0100, Dmitry Vyukov wrote:
>> On Mon, Mar 6, 2017 at 1:13 PM, Peter Zijlstra <peterz@infradead.org> wrote:
>> > On Mon, Mar 06, 2017 at 10:57:07AM +0100, Dmitry Vyukov wrote:
>> >> Hello,
>> >>
>> >> I've got the following use-after-free report while running syzkaller
>> >> fuzzer on 86292b33d4b79ee03e2f43ea0381ef85f077c760. Note that the task
>> >> is freed right in copy_process due to some error, but it's referenced
>> >> by another thread in perf subsystem.
>> >
>> > Weird... you don't happen to have a reproduction case available?
>>
>>
>> Unfortunately no. I've looked at both logs that I have and there are
>> no memory allocation failures preceding the crash (however maybe
>> somebody used NOWARN?). But probably if you inject an error into
>> copy_process somewhere after perf_event_init_task, it should reproduce
>> the bug with KASAN I think.
>
> I'll try. Thanks!


I think you will also need the attached patch. It seems that it was
found due to it. Going to send it out soon.

[-- Attachment #2: atomic.patch --]
[-- Type: text/x-patch, Size: 7088 bytes --]

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 14635c5ea025..64f0a7fb9b2f 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -2,6 +2,7 @@
 #define _ASM_X86_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <linux/kasan-checks.h>
 #include <linux/types.h>
 #include <asm/alternative.h>
 #include <asm/cmpxchg.h>
@@ -47,6 +48,7 @@ static __always_inline void atomic_set(atomic_t *v, int i)
  */
 static __always_inline void atomic_add(int i, atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "addl %1,%0"
 		     : "+m" (v->counter)
 		     : "ir" (i));
@@ -61,6 +63,7 @@ static __always_inline void atomic_add(int i, atomic_t *v)
  */
 static __always_inline void atomic_sub(int i, atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "subl %1,%0"
 		     : "+m" (v->counter)
 		     : "ir" (i));
@@ -77,6 +80,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
  */
 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
 
@@ -88,6 +92,7 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
  */
 static __always_inline void atomic_inc(atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "incl %0"
 		     : "+m" (v->counter));
 }
@@ -100,6 +105,7 @@ static __always_inline void atomic_inc(atomic_t *v)
  */
 static __always_inline void atomic_dec(atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "decl %0"
 		     : "+m" (v->counter));
 }
@@ -114,6 +120,7 @@ static __always_inline void atomic_dec(atomic_t *v)
  */
 static __always_inline bool atomic_dec_and_test(atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
 
@@ -127,6 +134,7 @@ static __always_inline bool atomic_dec_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_inc_and_test(atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
 
@@ -141,6 +149,7 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
  */
 static __always_inline bool atomic_add_negative(int i, atomic_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
 
@@ -194,6 +203,7 @@ static inline int atomic_xchg(atomic_t *v, int new)
 #define ATOMIC_OP(op)							\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
+	kasan_check_write(v, sizeof(*v));				\
 	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
 			: "+m" (v->counter)				\
 			: "ir" (i)					\
@@ -258,6 +268,7 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  */
 static __always_inline short int atomic_inc_short(short int *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
 	return *v;
 }
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 89ed2f6ae2f7..a75cb76c7b9b 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -42,6 +42,7 @@ static inline void atomic64_set(atomic64_t *v, long i)
  */
 static __always_inline void atomic64_add(long i, atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "addq %1,%0"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
@@ -56,6 +57,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
  */
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "subq %1,%0"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
@@ -72,6 +74,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
  */
 static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
 
@@ -83,6 +86,7 @@ static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
  */
 static __always_inline void atomic64_inc(atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "incq %0"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
@@ -96,6 +100,7 @@ static __always_inline void atomic64_inc(atomic64_t *v)
  */
 static __always_inline void atomic64_dec(atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	asm volatile(LOCK_PREFIX "decq %0"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
@@ -111,6 +116,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
  */
 static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
 
@@ -124,6 +130,7 @@ static inline bool atomic64_dec_and_test(atomic64_t *v)
  */
 static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
 
@@ -138,6 +145,7 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
  */
 static inline bool atomic64_add_negative(long i, atomic64_t *v)
 {
+	kasan_check_write(v, sizeof(*v));
 	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
 
@@ -233,6 +241,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
 #define ATOMIC64_OP(op)							\
 static inline void atomic64_##op(long i, atomic64_t *v)			\
 {									\
+	kasan_check_write(v, sizeof(*v));				\
 	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
 			: "+m" (v->counter)				\
 			: "er" (i)					\
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 97848cdfcb1a..1632918cf9b9 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
 #define ASM_X86_CMPXCHG_H
 
 #include <linux/compiler.h>
+#include <linux/kasan-checks.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
@@ -41,6 +42,7 @@ extern void __add_wrong_size(void)
 #define __xchg_op(ptr, arg, op, lock)					\
 	({								\
 	        __typeof__ (*(ptr)) __ret = (arg);			\
+	        kasan_check_write((void*)(ptr), sizeof(*(ptr)));	\
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
 			asm volatile (lock #op "b %b0, %1\n"		\
@@ -86,6 +88,7 @@ extern void __add_wrong_size(void)
 	__typeof__(*(ptr)) __ret;					\
 	__typeof__(*(ptr)) __old = (old);				\
 	__typeof__(*(ptr)) __new = (new);				\
+	kasan_check_write((void*)(ptr), sizeof(*(ptr)));		\
 	switch (size) {							\
 	case __X86_CASE_B:						\
 	{								\
@@ -171,6 +174,7 @@ extern void __add_wrong_size(void)
 	BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));			\
 	VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));		\
 	VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));	\
+	kasan_check_write((void*)(p1), 2 * sizeof(*(p1)));		\
 	asm volatile(pfx "cmpxchg%c4b %2; sete %0"			\
 		     : "=a" (__ret), "+d" (__old2),			\
 		       "+m" (*(p1)), "+m" (*(p2))			\

  reply	other threads:[~2017-03-06 12:38 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-06  9:57 Dmitry Vyukov
2017-03-06 12:13 ` Peter Zijlstra
2017-03-06 12:17   ` Dmitry Vyukov
2017-03-06 12:23     ` Peter Zijlstra
2017-03-06 12:27       ` Dmitry Vyukov [this message]
2017-03-06 12:47         ` Peter Zijlstra
2017-03-06 13:14 ` Peter Zijlstra
2017-03-06 13:34   ` Dmitry Vyukov
2017-03-07  9:08     ` Peter Zijlstra
2017-03-07  9:26       ` Dmitry Vyukov
2017-03-07  9:37         ` Peter Zijlstra
2017-03-07  9:43           ` Dmitry Vyukov
2017-03-07 10:00             ` Peter Zijlstra
2017-03-07 13:16   ` Peter Zijlstra
2017-03-07 13:27     ` Peter Zijlstra
2017-03-07 14:04   ` Oleg Nesterov
2017-03-07 14:17     ` Dmitry Vyukov
2017-03-07 16:51       ` Oleg Nesterov
2017-03-07 17:29         ` Peter Zijlstra
2017-03-14 12:55         ` Peter Zijlstra
2017-03-14 13:24           ` Oleg Nesterov
2017-03-14 13:47             ` Peter Zijlstra
2017-03-14 14:03           ` Oleg Nesterov
2017-03-14 14:07             ` Peter Zijlstra
2017-03-14 14:30               ` Oleg Nesterov
2017-03-14 15:02                 ` Peter Zijlstra
2017-03-14 15:07                   ` Peter Zijlstra
2017-03-14 15:37                     ` Oleg Nesterov
2017-03-14 15:46                       ` Peter Zijlstra
2017-03-14 15:19                   ` Oleg Nesterov
2017-03-14 15:26                     ` Peter Zijlstra
2017-03-14 15:59                       ` Peter Zijlstra
2017-03-15 16:43                         ` Oleg Nesterov
2017-03-16 12:05                           ` Peter Zijlstra
2017-03-16 13:57                           ` Peter Zijlstra
2017-03-16 16:41                             ` Oleg Nesterov
2017-03-16 12:47 [PATCH 0/4] perf patches Peter Zijlstra
2017-03-16 12:47 ` [PATCH 1/4] perf: Fix use-after-free in perf_release() Peter Zijlstra
2017-03-16 15:19   ` [tip:perf/urgent] perf/core: " tip-bot for Peter Zijlstra
2017-03-16 12:47 ` [PATCH 2/4] perf: Fix event inheritance on fork() Peter Zijlstra
2017-03-16 15:19   ` [tip:perf/urgent] perf/core: " tip-bot for Peter Zijlstra
2017-03-16 12:47 ` [PATCH 3/4] perf: Simplify perf_event_free_task() Peter Zijlstra
2017-03-16 15:20   ` [tip:perf/urgent] perf/core: " tip-bot for Peter Zijlstra
2017-03-16 12:47 ` [PATCH 4/4] perf: Better explain the inherit magic Peter Zijlstra
2017-03-16 15:21   ` [tip:perf/urgent] perf/core: " tip-bot for Peter Zijlstra
2017-03-16 13:20 ` [PATCH 0/4] perf patches Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CACT4Y+YNwcjebv9yK3S8xGcLqbrqq+Cb3+zbJWMnoinpt82dtg@mail.gmail.com \
    --to=dvyukov@google.com \
    --cc=acme@kernel.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=syzkaller@googlegroups.com \
    --subject='Re: perf: use-after-free in perf_release' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.