All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, io-uring@vger.kernel.org
Subject: [PATCH v2 09/12] io_uring: keep table of pointers to ubufs
Date: Sun, 25 Apr 2021 14:32:23 +0100	[thread overview]
Message-ID: <b96efa4c5febadeccf41d0e849ac099f4c83b0d3.1619356238.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1619356238.git.asml.silence@gmail.com>

Instead of keeping a table of ubufs convert them into pointers to ubuf,
so we can atomically read one pointer and be sure that the content of
ubuf won't change.

Because it was already dynamically allocating imu->bvec, throw both
imu and bvec into a single structure so they can be allocated together.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 35 +++++++++++++++++++----------------
 1 file changed, 19 insertions(+), 16 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5882303cc84a..ea725c0cbf79 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -195,9 +195,9 @@ enum io_uring_cmd_flags {
 struct io_mapped_ubuf {
 	u64		ubuf;
 	u64		ubuf_end;
-	struct		bio_vec *bvec;
 	unsigned int	nr_bvecs;
 	unsigned long	acct_pages;
+	struct bio_vec	bvec[];
 };
 
 struct io_ring_ctx;
@@ -405,7 +405,7 @@ struct io_ring_ctx {
 
 	/* if used, fixed mapped user buffers */
 	unsigned		nr_user_bufs;
-	struct io_mapped_ubuf	*user_bufs;
+	struct io_mapped_ubuf	**user_bufs;
 
 	struct user_struct	*user;
 
@@ -2760,7 +2760,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
 	if (unlikely(buf_index >= ctx->nr_user_bufs))
 		return -EFAULT;
 	index = array_index_nospec(buf_index, ctx->nr_user_bufs);
-	imu = &ctx->user_bufs[index];
+	imu = ctx->user_bufs[index];
 	buf_addr = req->rw.addr;
 
 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
@@ -8076,16 +8076,17 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
 	return off;
 }
 
-static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
+static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
 {
+	struct io_mapped_ubuf *imu = *slot;
 	unsigned int i;
 
 	for (i = 0; i < imu->nr_bvecs; i++)
 		unpin_user_page(imu->bvec[i].bv_page);
 	if (imu->acct_pages)
 		io_unaccount_mem(ctx, imu->acct_pages);
-	kvfree(imu->bvec);
-	imu->nr_bvecs = 0;
+	kvfree(imu);
+	*slot = NULL;
 }
 
 static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
@@ -8152,7 +8153,7 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
 
 	/* check previously registered pages */
 	for (i = 0; i < ctx->nr_user_bufs; i++) {
-		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
+		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
 
 		for (j = 0; j < imu->nr_bvecs; j++) {
 			if (!PageCompound(imu->bvec[j].bv_page))
@@ -8197,9 +8198,10 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 }
 
 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
-				  struct io_mapped_ubuf *imu,
+				  struct io_mapped_ubuf **pimu,
 				  struct page **last_hpage)
 {
+	struct io_mapped_ubuf *imu = NULL;
 	struct vm_area_struct **vmas = NULL;
 	struct page **pages = NULL;
 	unsigned long off, start, end, ubuf;
@@ -8211,6 +8213,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	start = ubuf >> PAGE_SHIFT;
 	nr_pages = end - start;
 
+	*pimu = NULL;
 	ret = -ENOMEM;
 
 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
@@ -8222,8 +8225,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	if (!vmas)
 		goto done;
 
-	imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
-				   GFP_KERNEL);
+	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
 	if (!imu->bvec)
 		goto done;
 
@@ -8253,14 +8255,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 		 */
 		if (pret > 0)
 			unpin_user_pages(pages, pret);
-		kvfree(imu->bvec);
 		goto done;
 	}
 
 	ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
 	if (ret) {
 		unpin_user_pages(pages, pret);
-		kvfree(imu->bvec);
 		goto done;
 	}
 
@@ -8280,8 +8280,11 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	imu->ubuf = ubuf;
 	imu->ubuf_end = ubuf + iov->iov_len;
 	imu->nr_bvecs = nr_pages;
+	*pimu = imu;
 	ret = 0;
 done:
+	if (ret)
+		kvfree(imu);
 	kvfree(pages);
 	kvfree(vmas);
 	return ret;
@@ -8331,15 +8334,15 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
 		return ret;
 
 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
-		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
-
 		ret = io_copy_iov(ctx, &iov, arg, i);
 		if (ret)
 			break;
 		ret = io_buffer_validate(&iov);
 		if (ret)
 			break;
-		ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
+
+		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
+					     &last_hpage);
 		if (ret)
 			break;
 	}
@@ -9248,7 +9251,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
 	}
 	seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
 	for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
-		struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
+		struct io_mapped_ubuf *buf = ctx->user_bufs[i];
 		unsigned int len = buf->ubuf_end - buf->ubuf;
 
 		seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
-- 
2.31.1


  parent reply	other threads:[~2021-04-25 13:32 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-25 13:32 [RFC v2 00/12] dynamic buffers + rsrc tagging Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 01/12] io_uring: move __io_sqe_files_unregister Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 02/12] io_uring: return back rsrc data free helper Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 03/12] io_uring: decouple CQE filling from requests Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 04/12] io_uring: preparation for rsrc tagging Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 05/12] io_uring: add generic path for rsrc update Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 06/12] io_uring: enumerate dynamic resources Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 07/12] io_uring: add IORING_REGISTER_RSRC Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 08/12] io_uring: add generic rsrc update with tags Pavel Begunkov
2021-04-25 13:32 ` Pavel Begunkov [this message]
2021-04-25 13:32 ` [PATCH v2 10/12] io_uring: prepare fixed rw for dynanic buffers Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 11/12] io_uring: implement fixed buffers registration similar to fixed files Pavel Begunkov
2021-04-25 13:32 ` [PATCH v2 12/12] io_uring: add full-fledged dynamic buffers support Pavel Begunkov
2021-04-25 16:15 ` [RFC v2 00/12] dynamic buffers + rsrc tagging Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b96efa4c5febadeccf41d0e849ac099f4c83b0d3.1619356238.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.