On 06/02/2020 19:51, Pavel Begunkov wrote: > After defer, a request will be prepared, that includes allocating iovec > if needed, and then submitted through io_wq_submit_work() but not custom > handler (e.g. io_rw_async()/io_sendrecv_async()). However, it'll leak > iovec, as it's in io-wq and the code goes as follows: > > io_read() { > if (!io_wq_current_is_worker()) > kfree(iovec); > } > > Put all deallocation logic in io_{read,write,send,recv}(), which will > leave the memory, if going async with -EAGAIN. > Interestingly, this will fail badly if it returns -EAGAIN from io-wq context. Apparently, I need to do v2. > It also fixes a leak after failed io_alloc_async_ctx() in > io_{recv,send}_msg(). > > Signed-off-by: Pavel Begunkov > --- > fs/io_uring.c | 47 ++++++++++++----------------------------------- > 1 file changed, 12 insertions(+), 35 deletions(-) > > diff --git a/fs/io_uring.c b/fs/io_uring.c > index bff7a03e873f..ce3dbd2b1b5c 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -2143,17 +2143,6 @@ static int io_alloc_async_ctx(struct io_kiocb *req) > return req->io == NULL; > } > > -static void io_rw_async(struct io_wq_work **workptr) > -{ > - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); > - struct iovec *iov = NULL; > - > - if (req->io->rw.iov != req->io->rw.fast_iov) > - iov = req->io->rw.iov; > - io_wq_submit_work(workptr); > - kfree(iov); > -} > - > static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, > struct iovec *iovec, struct iovec *fast_iov, > struct iov_iter *iter) > @@ -2166,7 +2155,6 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, > > io_req_map_rw(req, io_size, iovec, fast_iov, iter); > } > - req->work.func = io_rw_async; > return 0; > } > > @@ -2253,8 +2241,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, > } > } > out_free: > - if (!io_wq_current_is_worker()) > - kfree(iovec); > + kfree(iovec); > return ret; > } > > @@ -2359,8 +2346,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, > } > } > out_free: > - if (!io_wq_current_is_worker()) > - kfree(iovec); > + kfree(iovec); > return ret; > } > > @@ -2955,19 +2941,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, > return 0; > } > > -#if defined(CONFIG_NET) > -static void io_sendrecv_async(struct io_wq_work **workptr) > -{ > - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); > - struct iovec *iov = NULL; > - > - if (req->io->rw.iov != req->io->rw.fast_iov) > - iov = req->io->msg.iov; > - io_wq_submit_work(workptr); > - kfree(iov); > -} > -#endif > - > static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) > { > #if defined(CONFIG_NET) > @@ -3036,17 +3009,19 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt, > if (force_nonblock && ret == -EAGAIN) { > if (req->io) > return -EAGAIN; > - if (io_alloc_async_ctx(req)) > + if (io_alloc_async_ctx(req)) { > + if (kmsg && kmsg->iov != kmsg->fast_iov) > + kfree(kmsg->iov); > return -ENOMEM; > + } > memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); > - req->work.func = io_sendrecv_async; > return -EAGAIN; > } > if (ret == -ERESTARTSYS) > ret = -EINTR; > } > > - if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) > + if (kmsg && kmsg->iov != kmsg->fast_iov) > kfree(kmsg->iov); > io_cqring_add_event(req, ret); > if (ret < 0) > @@ -3180,17 +3155,19 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt, > if (force_nonblock && ret == -EAGAIN) { > if (req->io) > return -EAGAIN; > - if (io_alloc_async_ctx(req)) > + if (io_alloc_async_ctx(req)) { > + if (kmsg && kmsg->iov != kmsg->fast_iov) > + kfree(kmsg->iov); > return -ENOMEM; > + } > memcpy(&req->io->msg, &io.msg, sizeof(io.msg)); > - req->work.func = io_sendrecv_async; > return -EAGAIN; > } > if (ret == -ERESTARTSYS) > ret = -EINTR; > } > > - if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov) > + if (kmsg && kmsg->iov != kmsg->fast_iov) > kfree(kmsg->iov); > io_cqring_add_event(req, ret); > if (ret < 0) > -- Pavel Begunkov