From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from zeniv.linux.org.uk ([195.92.253.2]:58710 "EHLO ZenIV.linux.org.uk" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754651AbeE1R5I (ORCPT ); Mon, 28 May 2018 13:57:08 -0400 From: Al Viro To: linux-fsdevel@vger.kernel.org Cc: Christoph Hellwig Subject: [PATCH v2 1/6] aio: take list removal to (some) callers of aio_complete() Date: Mon, 28 May 2018 18:57:02 +0100 Message-Id: <20180528175707.10926-1-viro@ZenIV.linux.org.uk> In-Reply-To: <20180528175430.GC30522@ZenIV.linux.org.uk> References: <20180528175430.GC30522@ZenIV.linux.org.uk> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: From: Al Viro We really want iocb out of io_cancel(2) reach before we start tearing it down. Signed-off-by: Al Viro --- fs/aio.c | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index e0b2f183fa1c..f95b167801c2 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1073,14 +1073,6 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) unsigned tail, pos, head; unsigned long flags; - if (!list_empty_careful(&iocb->ki_list)) { - unsigned long flags; - - spin_lock_irqsave(&ctx->ctx_lock, flags); - list_del(&iocb->ki_list); - spin_unlock_irqrestore(&ctx->ctx_lock, flags); - } - /* * Add a completion event to the ring buffer. Must be done holding * ctx->completion_lock to prevent other code from messing with the tail @@ -1402,6 +1394,15 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) { struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); + if (!list_empty_careful(&iocb->ki_list)) { + struct kioctx *ctx = iocb->ki_ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->ctx_lock, flags); + list_del(&iocb->ki_list); + spin_unlock_irqrestore(&ctx->ctx_lock, flags); + } + if (kiocb->ki_flags & IOCB_WRITE) { struct inode *inode = file_inode(kiocb->ki_filp); @@ -1594,20 +1595,26 @@ static inline bool __aio_poll_remove(struct poll_iocb *req) return true; } -static inline void __aio_poll_complete(struct poll_iocb *req, __poll_t mask) +static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) { - struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); - struct file *file = req->file; - + fput(iocb->poll.file); aio_complete(iocb, mangle_poll(mask), 0); - fput(file); } static void aio_poll_work(struct work_struct *work) { - struct poll_iocb *req = container_of(work, struct poll_iocb, work); + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work); + + if (!list_empty_careful(&iocb->ki_list)) { + struct kioctx *ctx = iocb->ki_ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->ctx_lock, flags); + list_del(&iocb->ki_list); + spin_unlock_irqrestore(&ctx->ctx_lock, flags); + } - __aio_poll_complete(req, req->events); + __aio_poll_complete(iocb, iocb->poll.events); } static int aio_poll_cancel(struct kiocb *iocb) @@ -1658,7 +1665,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, list_del_init(&iocb->ki_list); spin_unlock(&iocb->ki_ctx->ctx_lock); - __aio_poll_complete(req, mask); + __aio_poll_complete(iocb, mask); } else { req->events = mask; INIT_WORK(&req->work, aio_poll_work); @@ -1710,7 +1717,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) spin_unlock_irq(&ctx->ctx_lock); done: if (mask) - __aio_poll_complete(req, mask); + __aio_poll_complete(aiocb, mask); return -EIOCBQUEUED; out_fail: fput(req->file); -- 2.11.0