io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] cleanup for req_free/find_next
@ 2020-06-29 10:12 Pavel Begunkov
  2020-06-29 10:12 ` [PATCH 1/5] io_uring: deduplicate freeing linked timeouts Pavel Begunkov
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:12 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Cleans a bit up completion path.

Regarding [5/5], it's so unlikely to happen, that I don't even try
to port it for 5.8, there would be merge conflicts. However, if anyone
think otherwise or this ever showed itself -- let me know.

Pavel Begunkov (5):
  io_uring: deduplicate freeing linked timeouts
  io_uring: replace find_next() out param with ret
  io_uring: kill REQ_F_TIMEOUT
  io_uring: kill REQ_F_TIMEOUT_NOSEQ
  io_uring: fix use after free

 fs/io_uring.c | 140 ++++++++++++++++++++++++--------------------------
 1 file changed, 66 insertions(+), 74 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/5] io_uring: deduplicate freeing linked timeouts
  2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
@ 2020-06-29 10:12 ` Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 2/5] io_uring: replace find_next() out param with ret Pavel Begunkov
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:12 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Linked timeout cancellation code is repeated in in io_req_link_next()
and io_fail_links(), and they differ in details even though shouldn't.
Basing on the fact that there is maximum one armed linked timeout in
a link, and it immediately follows the head, extract a function that
will check for it and defuse.

Justification:
- DRY and cleaner
- better inlining for io_req_link_next() (just 1 call site now)
- isolates linked_timeouts from common path
- reduces time under spinlock for failed links
- actually less code

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 88 +++++++++++++++++++++++----------------------------
 1 file changed, 40 insertions(+), 48 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6710097564de..4cd6d24276c3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1552,37 +1552,49 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
 	return false;
 }
 
-static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+static void io_kill_linked_timeout(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
+	struct io_kiocb *link;
 	bool wake_ev = false;
+	unsigned long flags = 0; /* false positive warning */
+
+	if (!(req->flags & REQ_F_COMP_LOCKED))
+		spin_lock_irqsave(&ctx->completion_lock, flags);
+
+	if (list_empty(&req->link_list))
+		goto out;
+	link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
+	if (link->opcode != IORING_OP_LINK_TIMEOUT)
+		goto out;
+
+	list_del_init(&link->link_list);
+	wake_ev = io_link_cancel_timeout(link);
+	req->flags &= ~REQ_F_LINK_TIMEOUT;
+out:
+	if (!(req->flags & REQ_F_COMP_LOCKED))
+		spin_unlock_irqrestore(&ctx->completion_lock, flags);
+	if (wake_ev)
+		io_cqring_ev_posted(ctx);
+}
+
+static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+	struct io_kiocb *nxt;
 
 	/*
 	 * The list should never be empty when we are called here. But could
 	 * potentially happen if the chain is messed up, check to be on the
 	 * safe side.
 	 */
-	while (!list_empty(&req->link_list)) {
-		struct io_kiocb *nxt = list_first_entry(&req->link_list,
-						struct io_kiocb, link_list);
-
-		if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
-			     (nxt->flags & REQ_F_TIMEOUT))) {
-			list_del_init(&nxt->link_list);
-			wake_ev |= io_link_cancel_timeout(nxt);
-			req->flags &= ~REQ_F_LINK_TIMEOUT;
-			continue;
-		}
-
-		list_del_init(&req->link_list);
-		if (!list_empty(&nxt->link_list))
-			nxt->flags |= REQ_F_LINK_HEAD;
-		*nxtptr = nxt;
-		break;
-	}
+	if (unlikely(list_empty(&req->link_list)))
+		return;
 
-	if (wake_ev)
-		io_cqring_ev_posted(ctx);
+	nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
+	list_del_init(&req->link_list);
+	if (!list_empty(&nxt->link_list))
+		nxt->flags |= REQ_F_LINK_HEAD;
+	*nxtptr = nxt;
 }
 
 /*
@@ -1591,9 +1603,6 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
 static void io_fail_links(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ctx->completion_lock, flags);
 
 	while (!list_empty(&req->link_list)) {
 		struct io_kiocb *link = list_first_entry(&req->link_list,
@@ -1602,18 +1611,12 @@ static void io_fail_links(struct io_kiocb *req)
 		list_del_init(&link->link_list);
 		trace_io_uring_fail_link(req, link);
 
-		if ((req->flags & REQ_F_LINK_TIMEOUT) &&
-		    link->opcode == IORING_OP_LINK_TIMEOUT) {
-			io_link_cancel_timeout(link);
-		} else {
-			io_cqring_fill_event(link, -ECANCELED);
-			__io_double_put_req(link);
-		}
+		io_cqring_fill_event(link, -ECANCELED);
+		__io_double_put_req(link);
 		req->flags &= ~REQ_F_LINK_TIMEOUT;
 	}
 
 	io_commit_cqring(ctx);
-	spin_unlock_irqrestore(&ctx->completion_lock, flags);
 	io_cqring_ev_posted(ctx);
 }
 
@@ -1623,30 +1626,19 @@ static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
 		return;
 	req->flags &= ~REQ_F_LINK_HEAD;
 
+	if (req->flags & REQ_F_LINK_TIMEOUT)
+		io_kill_linked_timeout(req);
+
 	/*
 	 * If LINK is set, we have dependent requests in this chain. If we
 	 * didn't fail this request, queue the first one up, moving any other
 	 * dependencies to the next request. In case of failure, fail the rest
 	 * of the chain.
 	 */
-	if (req->flags & REQ_F_FAIL_LINK) {
+	if (req->flags & REQ_F_FAIL_LINK)
 		io_fail_links(req);
-	} else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
-			REQ_F_LINK_TIMEOUT) {
-		struct io_ring_ctx *ctx = req->ctx;
-		unsigned long flags;
-
-		/*
-		 * If this is a timeout link, we could be racing with the
-		 * timeout timer. Grab the completion lock for this case to
-		 * protect against that.
-		 */
-		spin_lock_irqsave(&ctx->completion_lock, flags);
-		io_req_link_next(req, nxt);
-		spin_unlock_irqrestore(&ctx->completion_lock, flags);
-	} else {
+	else
 		io_req_link_next(req, nxt);
-	}
 }
 
 static void __io_req_task_cancel(struct io_kiocb *req, int error)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/5] io_uring: replace find_next() out param with ret
  2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
  2020-06-29 10:12 ` [PATCH 1/5] io_uring: deduplicate freeing linked timeouts Pavel Begunkov
@ 2020-06-29 10:13 ` Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 3/5] io_uring: kill REQ_F_TIMEOUT Pavel Begunkov
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:13 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Generally, it's better to return a value directly than having out
parameter. It's cleaner and saves from some kinds of ugly bugs.
May also be faster.

Return next request from io_req_find_next() and friends directly
instead of passing out parameter.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 4cd6d24276c3..52e5c8730dd5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1578,7 +1578,7 @@ static void io_kill_linked_timeout(struct io_kiocb *req)
 		io_cqring_ev_posted(ctx);
 }
 
-static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
 {
 	struct io_kiocb *nxt;
 
@@ -1588,13 +1588,13 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
 	 * safe side.
 	 */
 	if (unlikely(list_empty(&req->link_list)))
-		return;
+		return NULL;
 
 	nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
 	list_del_init(&req->link_list);
 	if (!list_empty(&nxt->link_list))
 		nxt->flags |= REQ_F_LINK_HEAD;
-	*nxtptr = nxt;
+	return nxt;
 }
 
 /*
@@ -1620,10 +1620,10 @@ static void io_fail_links(struct io_kiocb *req)
 	io_cqring_ev_posted(ctx);
 }
 
-static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
+static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
 {
 	if (likely(!(req->flags & REQ_F_LINK_HEAD)))
-		return;
+		return NULL;
 	req->flags &= ~REQ_F_LINK_HEAD;
 
 	if (req->flags & REQ_F_LINK_TIMEOUT)
@@ -1635,10 +1635,10 @@ static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
 	 * dependencies to the next request. In case of failure, fail the rest
 	 * of the chain.
 	 */
-	if (req->flags & REQ_F_FAIL_LINK)
-		io_fail_links(req);
-	else
-		io_req_link_next(req, nxt);
+	if (likely(!(req->flags & REQ_F_FAIL_LINK)))
+		return io_req_link_next(req);
+	io_fail_links(req);
+	return NULL;
 }
 
 static void __io_req_task_cancel(struct io_kiocb *req, int error)
@@ -1701,9 +1701,8 @@ static void io_req_task_queue(struct io_kiocb *req)
 
 static void io_queue_next(struct io_kiocb *req)
 {
-	struct io_kiocb *nxt = NULL;
+	struct io_kiocb *nxt = io_req_find_next(req);
 
-	io_req_find_next(req, &nxt);
 	if (nxt)
 		io_req_task_queue(nxt);
 }
@@ -1753,13 +1752,15 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
  * Drop reference to request, return next in chain (if there is one) if this
  * was the last reference to this request.
  */
-__attribute__((nonnull))
-static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
 {
+	struct io_kiocb *nxt = NULL;
+
 	if (refcount_dec_and_test(&req->refs)) {
-		io_req_find_next(req, nxtptr);
+		nxt = io_req_find_next(req);
 		__io_free_req(req);
 	}
+	return nxt;
 }
 
 static void io_put_req(struct io_kiocb *req)
@@ -1780,7 +1781,7 @@ static struct io_wq_work *io_steal_work(struct io_kiocb *req)
 	if (refcount_read(&req->refs) != 1)
 		return NULL;
 
-	io_req_find_next(req, &nxt);
+	nxt = io_req_find_next(req);
 	if (!nxt)
 		return NULL;
 
@@ -4465,7 +4466,7 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
 	hash_del(&req->hash_node);
 	io_poll_complete(req, req->result, 0);
 	req->flags |= REQ_F_COMP_LOCKED;
-	io_put_req_find_next(req, nxt);
+	*nxt = io_put_req_find_next(req);
 	spin_unlock_irq(&ctx->completion_lock);
 
 	io_cqring_ev_posted(ctx);
@@ -5915,9 +5916,8 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	}
 
 err:
-	nxt = NULL;
 	/* drop submission reference */
-	io_put_req_find_next(req, &nxt);
+	nxt = io_put_req_find_next(req);
 
 	if (linked_timeout) {
 		if (!ret)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/5] io_uring: kill REQ_F_TIMEOUT
  2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
  2020-06-29 10:12 ` [PATCH 1/5] io_uring: deduplicate freeing linked timeouts Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 2/5] io_uring: replace find_next() out param with ret Pavel Begunkov
@ 2020-06-29 10:13 ` Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 4/5] io_uring: kill REQ_F_TIMEOUT_NOSEQ Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 5/5] io_uring: fix use after free Pavel Begunkov
  4 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:13 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Now REQ_F_TIMEOUT is set but never used, kill it

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 52e5c8730dd5..159882906a24 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -531,7 +531,6 @@ enum {
 	REQ_F_CUR_POS_BIT,
 	REQ_F_NOWAIT_BIT,
 	REQ_F_LINK_TIMEOUT_BIT,
-	REQ_F_TIMEOUT_BIT,
 	REQ_F_ISREG_BIT,
 	REQ_F_TIMEOUT_NOSEQ_BIT,
 	REQ_F_COMP_LOCKED_BIT,
@@ -574,8 +573,6 @@ enum {
 	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
 	/* has linked timeout */
 	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
-	/* timeout request */
-	REQ_F_TIMEOUT		= BIT(REQ_F_TIMEOUT_BIT),
 	/* regular file */
 	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
 	/* no timeout sequence */
@@ -5040,7 +5037,6 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
 	data = &req->io->timeout;
 	data->req = req;
-	req->flags |= REQ_F_TIMEOUT;
 
 	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
 		return -EFAULT;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/5] io_uring: kill REQ_F_TIMEOUT_NOSEQ
  2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
                   ` (2 preceding siblings ...)
  2020-06-29 10:13 ` [PATCH 3/5] io_uring: kill REQ_F_TIMEOUT Pavel Begunkov
@ 2020-06-29 10:13 ` Pavel Begunkov
  2020-06-29 10:13 ` [PATCH 5/5] io_uring: fix use after free Pavel Begunkov
  4 siblings, 0 replies; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:13 UTC (permalink / raw)
  To: Jens Axboe, io-uring

There are too many useless flags, kill REQ_F_TIMEOUT_NOSEQ, which can be
easily infered from req.timeout itself.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 159882906a24..40996f25822e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -532,7 +532,6 @@ enum {
 	REQ_F_NOWAIT_BIT,
 	REQ_F_LINK_TIMEOUT_BIT,
 	REQ_F_ISREG_BIT,
-	REQ_F_TIMEOUT_NOSEQ_BIT,
 	REQ_F_COMP_LOCKED_BIT,
 	REQ_F_NEED_CLEANUP_BIT,
 	REQ_F_OVERFLOW_BIT,
@@ -575,8 +574,6 @@ enum {
 	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
 	/* regular file */
 	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
-	/* no timeout sequence */
-	REQ_F_TIMEOUT_NOSEQ	= BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
 	/* completion under lock */
 	REQ_F_COMP_LOCKED	= BIT(REQ_F_COMP_LOCKED_BIT),
 	/* needs cleanup */
@@ -1010,6 +1007,11 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
 	complete(&ctx->ref_comp);
 }
 
+static inline bool io_is_timeout_noseq(struct io_kiocb *req)
+{
+	return !req->timeout.off;
+}
+
 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 {
 	struct io_ring_ctx *ctx;
@@ -1222,7 +1224,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
 		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
 							struct io_kiocb, list);
 
-		if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+		if (io_is_timeout_noseq(req))
 			break;
 		if (req->timeout.target_seq != ctx->cached_cq_tail
 					- atomic_read(&ctx->cq_timeouts))
@@ -5064,8 +5066,7 @@ static int io_timeout(struct io_kiocb *req)
 	 * timeout event to be satisfied. If it isn't set, then this is
 	 * a pure timeout request, sequence isn't used.
 	 */
-	if (!off) {
-		req->flags |= REQ_F_TIMEOUT_NOSEQ;
+	if (io_is_timeout_noseq(req)) {
 		entry = ctx->timeout_list.prev;
 		goto add;
 	}
@@ -5080,7 +5081,7 @@ static int io_timeout(struct io_kiocb *req)
 	list_for_each_prev(entry, &ctx->timeout_list) {
 		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
 
-		if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+		if (io_is_timeout_noseq(nxt))
 			continue;
 		/* nxt.seq is behind @tail, otherwise would've been completed */
 		if (off >= nxt->timeout.target_seq - tail)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 5/5] io_uring: fix use after free
  2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
                   ` (3 preceding siblings ...)
  2020-06-29 10:13 ` [PATCH 4/5] io_uring: kill REQ_F_TIMEOUT_NOSEQ Pavel Begunkov
@ 2020-06-29 10:13 ` Pavel Begunkov
  2020-07-03  2:39   ` Jann Horn
  4 siblings, 1 reply; 12+ messages in thread
From: Pavel Begunkov @ 2020-06-29 10:13 UTC (permalink / raw)
  To: Jens Axboe, io-uring

After __io_free_req() put a ctx ref, it should assumed that the ctx may
already be gone. However, it can be accessed to put back fallback req.
Free req first and then put a req.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 40996f25822e..013f31e35e78 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1526,12 +1526,15 @@ static void io_dismantle_req(struct io_kiocb *req)
 
 static void __io_free_req(struct io_kiocb *req)
 {
+	struct io_ring_ctx *ctx;
+
 	io_dismantle_req(req);
-	percpu_ref_put(&req->ctx->refs);
+	ctx = req->ctx;
 	if (likely(!io_is_fallback_req(req)))
 		kmem_cache_free(req_cachep, req);
 	else
-		clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
+		clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
+	percpu_ref_put(&ctx->refs);
 }
 
 static bool io_link_cancel_timeout(struct io_kiocb *req)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-06-29 10:13 ` [PATCH 5/5] io_uring: fix use after free Pavel Begunkov
@ 2020-07-03  2:39   ` Jann Horn
  2020-07-03 19:48     ` Pavel Begunkov
  0 siblings, 1 reply; 12+ messages in thread
From: Jann Horn @ 2020-07-03  2:39 UTC (permalink / raw)
  To: Pavel Begunkov; +Cc: Jens Axboe, io-uring

On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
> After __io_free_req() put a ctx ref, it should assumed that the ctx may
> already be gone. However, it can be accessed to put back fallback req.
> Free req first and then put a req.

Please stick "Fixes" tags on bug fixes to make it easy to see when the
fixed bug was introduced (especially for ones that fix severe issues
like UAFs). From a cursory glance, it kinda seems like this one
_might_ have been introduced in 2b85edfc0c90ef, which would mean that
it landed in 5.6? But I can't really tell for sure without investing
more time; you probably know that better.

And if this actually does affect existing releases, please also stick
a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
shipped to users of those releases.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-07-03  2:39   ` Jann Horn
@ 2020-07-03 19:48     ` Pavel Begunkov
  2020-07-03 21:32       ` Jann Horn
  0 siblings, 1 reply; 12+ messages in thread
From: Pavel Begunkov @ 2020-07-03 19:48 UTC (permalink / raw)
  To: Jann Horn; +Cc: Jens Axboe, io-uring

On 03/07/2020 05:39, Jann Horn wrote:
> On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>> After __io_free_req() put a ctx ref, it should assumed that the ctx may
>> already be gone. However, it can be accessed to put back fallback req.
>> Free req first and then put a req.
> 
> Please stick "Fixes" tags on bug fixes to make it easy to see when the
> fixed bug was introduced (especially for ones that fix severe issues
> like UAFs). From a cursory glance, it kinda seems like this one
> _might_ have been introduced in 2b85edfc0c90ef, which would mean that
> it landed in 5.6? But I can't really tell for sure without investing
> more time; you probably know that better.

It was there from the beginning,
0ddf92e848ab7 ("io_uring: provide fallback request for OOM situations")

> 
> And if this actually does affect existing releases, please also stick
> a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
> shipped to users of those releases.

As mentioned in the cover letter, it's pretty unlikely to ever happen.
No one seems to have seen it since its introduction in November 2019.
And as the patch can't be backported automatically, not sure it's worth
the effort. Am I misjudging here?

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-07-03 19:48     ` Pavel Begunkov
@ 2020-07-03 21:32       ` Jann Horn
  2020-07-04  6:49         ` Pavel Begunkov
  0 siblings, 1 reply; 12+ messages in thread
From: Jann Horn @ 2020-07-03 21:32 UTC (permalink / raw)
  To: Pavel Begunkov; +Cc: Jens Axboe, io-uring

On Fri, Jul 3, 2020 at 9:50 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
> On 03/07/2020 05:39, Jann Horn wrote:
> > On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
> >> After __io_free_req() put a ctx ref, it should assumed that the ctx may
> >> already be gone. However, it can be accessed to put back fallback req.
> >> Free req first and then put a req.
> >
> > Please stick "Fixes" tags on bug fixes to make it easy to see when the
> > fixed bug was introduced (especially for ones that fix severe issues
> > like UAFs). From a cursory glance, it kinda seems like this one
> > _might_ have been introduced in 2b85edfc0c90ef, which would mean that
> > it landed in 5.6? But I can't really tell for sure without investing
> > more time; you probably know that better.
>
> It was there from the beginning,
> 0ddf92e848ab7 ("io_uring: provide fallback request for OOM situations")
>
> >
> > And if this actually does affect existing releases, please also stick
> > a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
> > shipped to users of those releases.
>
> As mentioned in the cover letter, it's pretty unlikely to ever happen.
> No one seems to have seen it since its introduction in November 2019.
> And as the patch can't be backported automatically, not sure it's worth
> the effort. Am I misjudging here?

Use-after-free bugs are often security bugs; in particular when, as in
this case, data is written through the freed pointer. That means that
even if this is extremely unlikely to occur in practice under normal
circumstances, you should assume that someone may invest a significant
amount of time into engineering some way to make this bug happen. If
you can show that the bug is _impossible_ to hit, that's fine, I
guess. But if it's merely "it's a really tight race and unlikely to
happen", I think we should be fixing it on the stable branches.
For example, on kernels with PREEMPT=y (typically you get that config
either with "lowlatency" kernels or on Android, I think), attackers
can play games like giving their own tasks "idle" scheduling priority
and intentionally preempting them right in the middle of a race
window, which makes it possible to delay execution for intervals on
the order of seconds if the attacker can manage to make the scheduler
IPI hit in the right place.

I guess one way to hit this bug on mainline would be to go through the
io_async_task_func() canceled==true case, right? That will drop
references on a request at the very end, without holding any locks or
so that might keep the context alive.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-07-03 21:32       ` Jann Horn
@ 2020-07-04  6:49         ` Pavel Begunkov
  2020-07-07 12:46           ` Pavel Begunkov
  0 siblings, 1 reply; 12+ messages in thread
From: Pavel Begunkov @ 2020-07-04  6:49 UTC (permalink / raw)
  To: Jann Horn; +Cc: Jens Axboe, io-uring

On 04/07/2020 00:32, Jann Horn wrote:
> On Fri, Jul 3, 2020 at 9:50 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>> On 03/07/2020 05:39, Jann Horn wrote:
>>> On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>>>> After __io_free_req() put a ctx ref, it should assumed that the ctx may
>>>> already be gone. However, it can be accessed to put back fallback req.
>>>> Free req first and then put a req.
>>>
>>> Please stick "Fixes" tags on bug fixes to make it easy to see when the
>>> fixed bug was introduced (especially for ones that fix severe issues
>>> like UAFs). From a cursory glance, it kinda seems like this one
>>> _might_ have been introduced in 2b85edfc0c90ef, which would mean that
>>> it landed in 5.6? But I can't really tell for sure without investing
>>> more time; you probably know that better.
>>
>> It was there from the beginning,
>> 0ddf92e848ab7 ("io_uring: provide fallback request for OOM situations")
>>
>>>
>>> And if this actually does affect existing releases, please also stick
>>> a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
>>> shipped to users of those releases.
>>
>> As mentioned in the cover letter, it's pretty unlikely to ever happen.
>> No one seems to have seen it since its introduction in November 2019.
>> And as the patch can't be backported automatically, not sure it's worth
>> the effort. Am I misjudging here?
> 
> Use-after-free bugs are often security bugs; in particular when, as in
> this case, data is written through the freed pointer. That means that
> even if this is extremely unlikely to occur in practice under normal
> circumstances, you should assume that someone may invest a significant
> amount of time into engineering some way to make this bug happen. If

Good point, agree.

> you can show that the bug is _impossible_ to hit, that's fine, I
> guess. But if it's merely "it's a really tight race and unlikely to
> happen", I think we should be fixing it on the stable branches.
> For example, on kernels with PREEMPT=y (typically you get that config
> either with "lowlatency" kernels or on Android, I think), attackers
> can play games like giving their own tasks "idle" scheduling priority
> and intentionally preempting them right in the middle of a race
> window, which makes it possible to delay execution for intervals on
> the order of seconds if the attacker can manage to make the scheduler
> IPI hit in the right place.
> 
> I guess one way to hit this bug on mainline would be to go through the
> io_async_task_func() canceled==true case, right? That will drop
> references on a request at the very end, without holding any locks or
> so that might keep the context alive.

Yes, for an example, but even simpler to submit async reads/writes
(i.e. with kiocb->ki_complete set) that would trigger io_complete_rw()
from irq. Or do something with timeout or poll requests.

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-07-04  6:49         ` Pavel Begunkov
@ 2020-07-07 12:46           ` Pavel Begunkov
  2020-07-07 13:56             ` Jens Axboe
  0 siblings, 1 reply; 12+ messages in thread
From: Pavel Begunkov @ 2020-07-07 12:46 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Jann Horn, io-uring

On 04/07/2020 09:49, Pavel Begunkov wrote:
> On 04/07/2020 00:32, Jann Horn wrote:
>> On Fri, Jul 3, 2020 at 9:50 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>>> On 03/07/2020 05:39, Jann Horn wrote:
>>>> On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>>>>> After __io_free_req() put a ctx ref, it should assumed that the ctx may
>>>>> already be gone. However, it can be accessed to put back fallback req.
>>>>> Free req first and then put a req.
>>>>
>>>> Please stick "Fixes" tags on bug fixes to make it easy to see when the
>>>> fixed bug was introduced (especially for ones that fix severe issues
>>>> like UAFs). From a cursory glance, it kinda seems like this one
>>>> _might_ have been introduced in 2b85edfc0c90ef, which would mean that
>>>> it landed in 5.6? But I can't really tell for sure without investing
>>>> more time; you probably know that better.
>>>
>>> It was there from the beginning,
>>> 0ddf92e848ab7 ("io_uring: provide fallback request for OOM situations")
>>>
>>>>
>>>> And if this actually does affect existing releases, please also stick
>>>> a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
>>>> shipped to users of those releases.
>>>
>>> As mentioned in the cover letter, it's pretty unlikely to ever happen.
>>> No one seems to have seen it since its introduction in November 2019.
>>> And as the patch can't be backported automatically, not sure it's worth
>>> the effort. Am I misjudging here?
>>
>> Use-after-free bugs are often security bugs; in particular when, as in
>> this case, data is written through the freed pointer. That means that
>> even if this is extremely unlikely to occur in practice under normal
>> circumstances, you should assume that someone may invest a significant
>> amount of time into engineering some way to make this bug happen. If

Jens, how would you prefer to handle this for 5.8? I can send a patch, but
1. it's fixed in for-5.9
2. it would be a merge conflict regardless of 1.

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] io_uring: fix use after free
  2020-07-07 12:46           ` Pavel Begunkov
@ 2020-07-07 13:56             ` Jens Axboe
  0 siblings, 0 replies; 12+ messages in thread
From: Jens Axboe @ 2020-07-07 13:56 UTC (permalink / raw)
  To: Pavel Begunkov; +Cc: Jann Horn, io-uring

On 7/7/20 6:46 AM, Pavel Begunkov wrote:
> On 04/07/2020 09:49, Pavel Begunkov wrote:
>> On 04/07/2020 00:32, Jann Horn wrote:
>>> On Fri, Jul 3, 2020 at 9:50 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>>>> On 03/07/2020 05:39, Jann Horn wrote:
>>>>> On Mon, Jun 29, 2020 at 10:44 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>>>>>> After __io_free_req() put a ctx ref, it should assumed that the ctx may
>>>>>> already be gone. However, it can be accessed to put back fallback req.
>>>>>> Free req first and then put a req.
>>>>>
>>>>> Please stick "Fixes" tags on bug fixes to make it easy to see when the
>>>>> fixed bug was introduced (especially for ones that fix severe issues
>>>>> like UAFs). From a cursory glance, it kinda seems like this one
>>>>> _might_ have been introduced in 2b85edfc0c90ef, which would mean that
>>>>> it landed in 5.6? But I can't really tell for sure without investing
>>>>> more time; you probably know that better.
>>>>
>>>> It was there from the beginning,
>>>> 0ddf92e848ab7 ("io_uring: provide fallback request for OOM situations")
>>>>
>>>>>
>>>>> And if this actually does affect existing releases, please also stick
>>>>> a "Cc: stable@vger.kernel.org" tag on it so that the fix can be
>>>>> shipped to users of those releases.
>>>>
>>>> As mentioned in the cover letter, it's pretty unlikely to ever happen.
>>>> No one seems to have seen it since its introduction in November 2019.
>>>> And as the patch can't be backported automatically, not sure it's worth
>>>> the effort. Am I misjudging here?
>>>
>>> Use-after-free bugs are often security bugs; in particular when, as in
>>> this case, data is written through the freed pointer. That means that
>>> even if this is extremely unlikely to occur in practice under normal
>>> circumstances, you should assume that someone may invest a significant
>>> amount of time into engineering some way to make this bug happen. If
> 
> Jens, how would you prefer to handle this for 5.8? I can send a patch, but
> 1. it's fixed in for-5.9
> 2. it would be a merge conflict regardless of 1.

Given the type of bug and conditions required to trigger it, I think we
should just send it in for stable once it lands in 5.9. We need GFP_KERNEL
allocations to fail, the fallback req the last to be released, the ctx
freed (and reused) in an extremely short window.

If it was more severe or easier to trigger, then we could deal with the
pain of the conflict. But I don't think it's worth it for this one.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-07-07 13:56 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-29 10:12 [PATCH 0/5] cleanup for req_free/find_next Pavel Begunkov
2020-06-29 10:12 ` [PATCH 1/5] io_uring: deduplicate freeing linked timeouts Pavel Begunkov
2020-06-29 10:13 ` [PATCH 2/5] io_uring: replace find_next() out param with ret Pavel Begunkov
2020-06-29 10:13 ` [PATCH 3/5] io_uring: kill REQ_F_TIMEOUT Pavel Begunkov
2020-06-29 10:13 ` [PATCH 4/5] io_uring: kill REQ_F_TIMEOUT_NOSEQ Pavel Begunkov
2020-06-29 10:13 ` [PATCH 5/5] io_uring: fix use after free Pavel Begunkov
2020-07-03  2:39   ` Jann Horn
2020-07-03 19:48     ` Pavel Begunkov
2020-07-03 21:32       ` Jann Horn
2020-07-04  6:49         ` Pavel Begunkov
2020-07-07 12:46           ` Pavel Begunkov
2020-07-07 13:56             ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).