linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL
@ 2020-06-02 12:34 Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 1/4] io_uring: fix " Pavel Begunkov
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 12:34 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

The first one adds checks {SQPOLL,IOPOLL} for open, close, etc.
And 3 others are just cleanups on top.

notes:
- it cures symptoms, but would be great what's with null-deref in [1/4]

- need to look whether epoll and sfr are affected by {SQ,IO}POLL

- it still allows statx with SQPOLL, because it has a bunchg of rules
for ignoring dirfd, e.g. when absolute path is specified.
Let's handle it separately.

Pavel Begunkov (4):
  io_uring: fix open/close/statx with {SQ,IO}POLL
  io_uring: do build_open_how() only once
  io_uring: deduplicate io_openat{,2}_prep()
  io_uring: move send/recv IOPOLL check into prep

 fs/io_uring.c | 78 +++++++++++++++++++++------------------------------
 1 file changed, 32 insertions(+), 46 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] io_uring: fix open/close/statx with {SQ,IO}POLL
  2020-06-02 12:34 [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL Pavel Begunkov
@ 2020-06-02 12:34 ` Pavel Begunkov
  2020-06-02 14:12   ` Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 2/4] io_uring: do build_open_how() only once Pavel Begunkov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 12:34 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

Trying to use them with IORING_SETUP_IOPOLL:

RIP: 0010:io_iopoll_getevents+0x111/0x5a0
Call Trace:
 ? _raw_spin_unlock_irqrestore+0x24/0x40
 ? do_send_sig_info+0x64/0x90
 io_iopoll_reap_events.part.0+0x5e/0xa0
 io_ring_ctx_wait_and_kill+0x132/0x1c0
 io_uring_release+0x20/0x30
 __fput+0xcd/0x230
 ____fput+0xe/0x10
 task_work_run+0x67/0xa0
 do_exit+0x353/0xb10
 ? handle_mm_fault+0xd4/0x200
 ? syscall_trace_enter+0x18c/0x2c0
 do_group_exit+0x43/0xa0
 __x64_sys_exit_group+0x18/0x20
 do_syscall_64+0x60/0x1e0
 entry_SYSCALL_64_after_hwframe+0x44/0xa9

Also SQPOLL thread can't know which file table to use with
open/close. Disallow all these cases.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 732ec73ec3c0..7208f91e9e77 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2990,6 +2990,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	const char __user *fname;
 	int ret;
 
+	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+		return -EINVAL;
 	if (sqe->ioprio || sqe->buf_index)
 		return -EINVAL;
 	if (req->flags & REQ_F_FIXED_FILE)
@@ -3023,6 +3025,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	size_t len;
 	int ret;
 
+	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+		return -EINVAL;
 	if (sqe->ioprio || sqe->buf_index)
 		return -EINVAL;
 	if (req->flags & REQ_F_FIXED_FILE)
@@ -3373,6 +3377,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
 
 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
+	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+		return -EINVAL;
 	if (sqe->ioprio || sqe->buf_index)
 		return -EINVAL;
 	if (req->flags & REQ_F_FIXED_FILE)
@@ -3417,6 +3423,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	 */
 	req->work.flags |= IO_WQ_WORK_NO_CANCEL;
 
+	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+		return -EINVAL;
 	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
 	    sqe->rw_flags || sqe->buf_index)
 		return -EINVAL;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] io_uring: do build_open_how() only once
  2020-06-02 12:34 [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 1/4] io_uring: fix " Pavel Begunkov
@ 2020-06-02 12:34 ` Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 3/4] io_uring: deduplicate io_openat{,2}_prep() Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 4/4] io_uring: move send/recv IOPOLL check into prep Pavel Begunkov
  3 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 12:34 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

build_open_how() is just adjusting open_flags/mode. Do it once during
prep. It looks better than storing raw values for the future.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7208f91e9e77..cdfffc23e10a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2988,6 +2988,7 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	const char __user *fname;
+	u64 flags, mode;
 	int ret;
 
 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
@@ -2999,13 +3000,14 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (req->flags & REQ_F_NEED_CLEANUP)
 		return 0;
 
-	req->open.dfd = READ_ONCE(sqe->fd);
-	req->open.how.mode = READ_ONCE(sqe->len);
-	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
-	req->open.how.flags = READ_ONCE(sqe->open_flags);
+	mode = READ_ONCE(sqe->len);
+	flags = READ_ONCE(sqe->open_flags);
 	if (force_o_largefile())
-		req->open.how.flags |= O_LARGEFILE;
+		flags |= O_LARGEFILE;
+	req->open.how = build_open_how(flags, mode);
 
+	req->open.dfd = READ_ONCE(sqe->fd);
+	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
 	req->open.filename = getname(fname);
 	if (IS_ERR(req->open.filename)) {
 		ret = PTR_ERR(req->open.filename);
@@ -3099,7 +3101,6 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
 
 static int io_openat(struct io_kiocb *req, bool force_nonblock)
 {
-	req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
 	return io_openat2(req, force_nonblock);
 }
 
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] io_uring: deduplicate io_openat{,2}_prep()
  2020-06-02 12:34 [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 1/4] io_uring: fix " Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 2/4] io_uring: do build_open_how() only once Pavel Begunkov
@ 2020-06-02 12:34 ` Pavel Begunkov
  2020-06-02 12:34 ` [PATCH 4/4] io_uring: move send/recv IOPOLL check into prep Pavel Begunkov
  3 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 12:34 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

io_openat_prep() and io_openat2_prep() are identical except for how
struct open_how is built. Deduplicate it with a helper.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 55 ++++++++++++++++++---------------------------------
 1 file changed, 19 insertions(+), 36 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index cdfffc23e10a..9fe90a66a31e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2985,26 +2985,21 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
 	return 0;
 }
 
-static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	const char __user *fname;
-	u64 flags, mode;
 	int ret;
 
 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
 		return -EINVAL;
-	if (sqe->ioprio || sqe->buf_index)
+	if (unlikely(sqe->ioprio || sqe->buf_index))
 		return -EINVAL;
-	if (req->flags & REQ_F_FIXED_FILE)
+	if (unlikely(req->flags & REQ_F_FIXED_FILE))
 		return -EBADF;
-	if (req->flags & REQ_F_NEED_CLEANUP)
-		return 0;
 
-	mode = READ_ONCE(sqe->len);
-	flags = READ_ONCE(sqe->open_flags);
-	if (force_o_largefile())
-		flags |= O_LARGEFILE;
-	req->open.how = build_open_how(flags, mode);
+	/* open.how should be already initialised */
+	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
+		req->open.how.flags |= O_LARGEFILE;
 
 	req->open.dfd = READ_ONCE(sqe->fd);
 	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -3014,33 +3009,33 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		req->open.filename = NULL;
 		return ret;
 	}
-
 	req->open.nofile = rlimit(RLIMIT_NOFILE);
 	req->flags |= REQ_F_NEED_CLEANUP;
 	return 0;
 }
 
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+	u64 flags, mode;
+
+	if (req->flags & REQ_F_NEED_CLEANUP)
+		return 0;
+	mode = READ_ONCE(sqe->len);
+	flags = READ_ONCE(sqe->open_flags);
+	req->open.how = build_open_how(flags, mode);
+	return __io_openat_prep(req, sqe);
+}
+
 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct open_how __user *how;
-	const char __user *fname;
 	size_t len;
 	int ret;
 
-	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-		return -EINVAL;
-	if (sqe->ioprio || sqe->buf_index)
-		return -EINVAL;
-	if (req->flags & REQ_F_FIXED_FILE)
-		return -EBADF;
 	if (req->flags & REQ_F_NEED_CLEANUP)
 		return 0;
-
-	req->open.dfd = READ_ONCE(sqe->fd);
-	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
 	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 	len = READ_ONCE(sqe->len);
-
 	if (len < OPEN_HOW_SIZE_VER0)
 		return -EINVAL;
 
@@ -3049,19 +3044,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (ret)
 		return ret;
 
-	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
-		req->open.how.flags |= O_LARGEFILE;
-
-	req->open.filename = getname(fname);
-	if (IS_ERR(req->open.filename)) {
-		ret = PTR_ERR(req->open.filename);
-		req->open.filename = NULL;
-		return ret;
-	}
-
-	req->open.nofile = rlimit(RLIMIT_NOFILE);
-	req->flags |= REQ_F_NEED_CLEANUP;
-	return 0;
+	return __io_openat_prep(req, sqe);
 }
 
 static int io_openat2(struct io_kiocb *req, bool force_nonblock)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] io_uring: move send/recv IOPOLL check into prep
  2020-06-02 12:34 [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL Pavel Begunkov
                   ` (2 preceding siblings ...)
  2020-06-02 12:34 ` [PATCH 3/4] io_uring: deduplicate io_openat{,2}_prep() Pavel Begunkov
@ 2020-06-02 12:34 ` Pavel Begunkov
  3 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 12:34 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

Fail recv/send in case of IORING_SETUP_IOPOLL earlier during prep,
so it'd be done only once. Removes duplication as well

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9fe90a66a31e..d7e9090a6be7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3541,6 +3541,9 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	struct io_async_ctx *io = req->io;
 	int ret;
 
+	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+		return -EINVAL;
+
 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 	sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 	sr->len = READ_ONCE(sqe->len);
@@ -3570,9 +3573,6 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
 	struct socket *sock;
 	int ret;
 
-	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-		return -EINVAL;
-
 	sock = sock_from_file(req->file, &ret);
 	if (sock) {
 		struct io_async_ctx io;
@@ -3626,9 +3626,6 @@ static int io_send(struct io_kiocb *req, bool force_nonblock)
 	struct socket *sock;
 	int ret;
 
-	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-		return -EINVAL;
-
 	sock = sock_from_file(req->file, &ret);
 	if (sock) {
 		struct io_sr_msg *sr = &req->sr_msg;
@@ -3781,6 +3778,9 @@ static int io_recvmsg_prep(struct io_kiocb *req,
 	struct io_async_ctx *io = req->io;
 	int ret;
 
+	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+		return -EINVAL;
+
 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 	sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 	sr->len = READ_ONCE(sqe->len);
@@ -3809,9 +3809,6 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
 	struct socket *sock;
 	int ret, cflags = 0;
 
-	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-		return -EINVAL;
-
 	sock = sock_from_file(req->file, &ret);
 	if (sock) {
 		struct io_buffer *kbuf;
@@ -3873,9 +3870,6 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock)
 	struct socket *sock;
 	int ret, cflags = 0;
 
-	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-		return -EINVAL;
-
 	sock = sock_from_file(req->file, &ret);
 	if (sock) {
 		struct io_sr_msg *sr = &req->sr_msg;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/4] io_uring: fix open/close/statx with {SQ,IO}POLL
  2020-06-02 12:34 ` [PATCH 1/4] io_uring: fix " Pavel Begunkov
@ 2020-06-02 14:12   ` Pavel Begunkov
  0 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-06-02 14:12 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

On 02/06/2020 15:34, Pavel Begunkov wrote:
> Trying to use them with IORING_SETUP_IOPOLL:
> 
> RIP: 0010:io_iopoll_getevents+0x111/0x5a0
> Call Trace:
>  ? _raw_spin_unlock_irqrestore+0x24/0x40
>  ? do_send_sig_info+0x64/0x90
>  io_iopoll_reap_events.part.0+0x5e/0xa0
>  io_ring_ctx_wait_and_kill+0x132/0x1c0
>  io_uring_release+0x20/0x30
>  __fput+0xcd/0x230
>  ____fput+0xe/0x10
>  task_work_run+0x67/0xa0
>  do_exit+0x353/0xb10
>  ? handle_mm_fault+0xd4/0x200
>  ? syscall_trace_enter+0x18c/0x2c0
>  do_group_exit+0x43/0xa0
>  __x64_sys_exit_group+0x18/0x20
>  do_syscall_64+0x60/0x1e0
>  entry_SYSCALL_64_after_hwframe+0x44/0xa9

io_do_iopoll()
{
	...
	ret = kiocb->*ki_filp*->f_op->iopoll(kiocb, spin);
}

Hmm, I'll double check later that only read*/write* can be done
with IOPOLL, and send a follow-up patch if necessary.

> 
> Also SQPOLL thread can't know which file table to use with
> open/close. Disallow all these cases.
> 
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>  fs/io_uring.c | 8 ++++++++
>  1 file changed, 8 insertions(+)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 732ec73ec3c0..7208f91e9e77 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2990,6 +2990,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  	const char __user *fname;
>  	int ret;
>  
> +	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
> +		return -EINVAL;
>  	if (sqe->ioprio || sqe->buf_index)
>  		return -EINVAL;
>  	if (req->flags & REQ_F_FIXED_FILE)
> @@ -3023,6 +3025,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  	size_t len;
>  	int ret;
>  
> +	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
> +		return -EINVAL;
>  	if (sqe->ioprio || sqe->buf_index)
>  		return -EINVAL;
>  	if (req->flags & REQ_F_FIXED_FILE)
> @@ -3373,6 +3377,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
>  
>  static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  {
> +	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
> +		return -EINVAL;
>  	if (sqe->ioprio || sqe->buf_index)
>  		return -EINVAL;
>  	if (req->flags & REQ_F_FIXED_FILE)
> @@ -3417,6 +3423,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  	 */
>  	req->work.flags |= IO_WQ_WORK_NO_CANCEL;
>  
> +	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
> +		return -EINVAL;
>  	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
>  	    sqe->rw_flags || sqe->buf_index)
>  		return -EINVAL;
> 

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-06-02 14:13 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-02 12:34 [PATCH 0/4] forbid using open/close/statx with {SQ,IO}POLL Pavel Begunkov
2020-06-02 12:34 ` [PATCH 1/4] io_uring: fix " Pavel Begunkov
2020-06-02 14:12   ` Pavel Begunkov
2020-06-02 12:34 ` [PATCH 2/4] io_uring: do build_open_how() only once Pavel Begunkov
2020-06-02 12:34 ` [PATCH 3/4] io_uring: deduplicate io_openat{,2}_prep() Pavel Begunkov
2020-06-02 12:34 ` [PATCH 4/4] io_uring: move send/recv IOPOLL check into prep Pavel Begunkov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).