All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed
@ 2019-05-23  1:59 Jackie Liu
  2019-05-23  1:59 ` [PATCH 2/3] io_uring: no need to reap event repeatedly Jackie Liu
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Jackie Liu @ 2019-05-23  1:59 UTC (permalink / raw)
  To: axboe; +Cc: linux-block, Jackie Liu

In extreme cases, memory may not be available. so we should
be clean up memory.

Signed-off-by: Jackie Liu <liuyun01@kylinos.cn>
---
 fs/io_uring.c | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 310f8d1..4430429 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2951,6 +2951,7 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	struct io_sq_ring *sq_ring;
 	struct io_cq_ring *cq_ring;
 	size_t size;
+	int ret;
 
 	sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
 	if (!sq_ring)
@@ -2963,16 +2964,22 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	ctx->sq_entries = sq_ring->ring_entries;
 
 	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
-	if (size == SIZE_MAX)
-		return -EOVERFLOW;
+	if (size == SIZE_MAX) {
+		ret = -EOVERFLOW;
+		goto err;
+	}
 
 	ctx->sq_sqes = io_mem_alloc(size);
-	if (!ctx->sq_sqes)
-		return -ENOMEM;
+	if (!ctx->sq_sqes) {
+		ret = -ENOMEM;
+		goto err;
+	}
 
 	cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
-	if (!cq_ring)
-		return -ENOMEM;
+	if (!cq_ring) {
+		ret = -ENOMEM;
+		goto err1;
+	}
 
 	ctx->cq_ring = cq_ring;
 	cq_ring->ring_mask = p->cq_entries - 1;
@@ -2980,6 +2987,12 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	ctx->cq_mask = cq_ring->ring_mask;
 	ctx->cq_entries = cq_ring->ring_entries;
 	return 0;
+
+err1:
+	io_mem_free(ctx->sq_sqes);
+err:
+	io_mem_free(ctx->sq_ring);
+	return ret;
 }
 
 /*
-- 
2.7.4





^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] io_uring: no need to reap event repeatedly
  2019-05-23  1:59 [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jackie Liu
@ 2019-05-23  1:59 ` Jackie Liu
  2019-05-23  1:59 ` [PATCH 3/3] io_uring: adjust the code logic when an error occurs Jackie Liu
  2019-05-23  3:15 ` [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Jackie Liu @ 2019-05-23  1:59 UTC (permalink / raw)
  To: axboe; +Cc: linux-block, Jackie Liu

Function io_ring_ctx_wait_and_kill use io_iopoll_reap_events
to reap events, No need to do it again in the io_ring_ctx_free
function.

Signed-off-by: Jackie Liu <liuyun01@kylinos.cn>
---
 fs/io_uring.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 4430429..3bbd202 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2771,7 +2771,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
 	if (ctx->sqo_mm)
 		mmdrop(ctx->sqo_mm);
 
-	io_iopoll_reap_events(ctx);
 	io_sqe_buffer_unregister(ctx);
 	io_sqe_files_unregister(ctx);
 	io_eventfd_unregister(ctx);
-- 
2.7.4




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] io_uring: adjust the code logic when an error occurs
  2019-05-23  1:59 [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jackie Liu
  2019-05-23  1:59 ` [PATCH 2/3] io_uring: no need to reap event repeatedly Jackie Liu
@ 2019-05-23  1:59 ` Jackie Liu
  2019-05-23  3:15 ` [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Jackie Liu @ 2019-05-23  1:59 UTC (permalink / raw)
  To: axboe; +Cc: linux-block, Jackie Liu

We can quickly reclaim memory and resources immediately
when error happened, without having to go through longer logic.

In case of create uring, there is no data generated at this
time, no need to reap events and polling data, it's more
efficient to directly recycle the memory.

In addition, it will make the code easy to understand.

Signed-off-by: Jackie Liu <liuyun01@kylinos.cn>
---
 fs/io_uring.c | 96 +++++++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 63 insertions(+), 33 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3bbd202..035d729 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -361,6 +361,7 @@ struct io_submit_state {
 };
 
 static void io_sq_wq_submit_work(struct work_struct *work);
+static void io_free_scq_urings(struct io_ring_ctx *ctx);
 
 static struct kmem_cache *req_cachep;
 
@@ -417,6 +418,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	return ctx;
 }
 
+static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+{
+	percpu_ref_exit(&ctx->refs);
+	kfree(ctx);
+}
+
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
 				     struct io_kiocb *req)
 {
@@ -2254,16 +2261,20 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
 	}
 }
 
-static void io_finish_async(struct io_ring_ctx *ctx)
+static void io_sq_wq_destroy(struct io_ring_ctx *ctx)
 {
-	io_sq_thread_stop(ctx);
-
 	if (ctx->sqo_wq) {
 		destroy_workqueue(ctx->sqo_wq);
 		ctx->sqo_wq = NULL;
 	}
 }
 
+static void io_finish_async(struct io_ring_ctx *ctx)
+{
+	io_sq_thread_stop(ctx);
+	io_sq_wq_destroy(ctx);
+}
+
 #if defined(CONFIG_UNIX)
 static void io_destruct_skb(struct sk_buff *skb)
 {
@@ -2483,6 +2494,18 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 	return ret;
 }
 
+static void io_sq_offload_end(struct io_ring_ctx *ctx)
+{
+	io_sq_thread_stop(ctx);
+
+	if (ctx->sqo_mm) {
+		mmdrop(ctx->sqo_mm);
+		ctx->sqo_mm = NULL;
+	}
+
+	io_sq_wq_destroy(ctx);
+}
+
 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
 {
 	atomic_long_sub(nr_pages, &user->locked_vm);
@@ -2765,33 +2788,6 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
 	return -ENXIO;
 }
 
-static void io_ring_ctx_free(struct io_ring_ctx *ctx)
-{
-	io_finish_async(ctx);
-	if (ctx->sqo_mm)
-		mmdrop(ctx->sqo_mm);
-
-	io_sqe_buffer_unregister(ctx);
-	io_sqe_files_unregister(ctx);
-	io_eventfd_unregister(ctx);
-
-#if defined(CONFIG_UNIX)
-	if (ctx->ring_sock)
-		sock_release(ctx->ring_sock);
-#endif
-
-	io_mem_free(ctx->sq_ring);
-	io_mem_free(ctx->sq_sqes);
-	io_mem_free(ctx->cq_ring);
-
-	percpu_ref_exit(&ctx->refs);
-	if (ctx->account_mem)
-		io_unaccount_mem(ctx->user,
-				ring_pages(ctx->sq_entries, ctx->cq_entries));
-	free_uid(ctx->user);
-	kfree(ctx);
-}
-
 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
 {
 	struct io_ring_ctx *ctx = file->private_data;
@@ -2825,9 +2821,27 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 	percpu_ref_kill(&ctx->refs);
 	mutex_unlock(&ctx->uring_lock);
 
+	/* poll all events and reap */
 	io_poll_remove_all(ctx);
 	io_iopoll_reap_events(ctx);
 	wait_for_completion(&ctx->ctx_done);
+
+	io_sq_offload_end(ctx);
+
+	/* unregister fixed buffer and files */
+	io_sqe_buffer_unregister(ctx);
+	io_sqe_files_unregister(ctx);
+	io_eventfd_unregister(ctx);
+
+#if defined(CONFIG_UNIX)
+	if (ctx->ring_sock)
+		sock_release(ctx->ring_sock);
+#endif
+	io_free_scq_urings(ctx);
+	if (ctx->account_mem)
+		io_unaccount_mem(ctx->user,
+				 ring_pages(ctx->sq_entries, ctx->cq_entries));
+	free_uid(ctx->user);
 	io_ring_ctx_free(ctx);
 }
 
@@ -2994,6 +3008,13 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	return ret;
 }
 
+static void io_free_scq_urings(struct io_ring_ctx *ctx)
+{
+	io_mem_free(ctx->sq_ring);
+	io_mem_free(ctx->sq_sqes);
+	io_mem_free(ctx->cq_ring);
+}
+
 /*
  * Allocate an anonymous fd, this is what constitutes the application
  * visible backing of an io_uring instance. The application mmaps this
@@ -3083,11 +3104,11 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 
 	ret = io_allocate_scq_urings(ctx, p);
 	if (ret)
-		goto err;
+		goto err_scq;
 
 	ret = io_sq_offload_start(ctx, p);
 	if (ret)
-		goto err;
+		goto err_off;
 
 	ret = io_uring_get_fd(ctx);
 	if (ret < 0)
@@ -3110,8 +3131,17 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 	p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
 	p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
 	return ret;
+
 err:
-	io_ring_ctx_wait_and_kill(ctx);
+	io_sq_offload_end(ctx);
+err_off:
+	io_free_scq_urings(ctx);
+err_scq:
+	free_uid(user);
+	if (account_mem)
+		io_unaccount_mem(user, ring_pages(p->sq_entries,
+							p->cq_entries));
+	io_ring_ctx_free(ctx);
 	return ret;
 }
 
-- 
2.7.4




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed
  2019-05-23  1:59 [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jackie Liu
  2019-05-23  1:59 ` [PATCH 2/3] io_uring: no need to reap event repeatedly Jackie Liu
  2019-05-23  1:59 ` [PATCH 3/3] io_uring: adjust the code logic when an error occurs Jackie Liu
@ 2019-05-23  3:15 ` Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2019-05-23  3:15 UTC (permalink / raw)
  To: Jackie Liu; +Cc: linux-block

On 5/22/19 7:59 PM, Jackie Liu wrote:
> In extreme cases, memory may not be available. so we should
> be clean up memory.

The caller frees these on error.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-05-23  3:15 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-23  1:59 [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jackie Liu
2019-05-23  1:59 ` [PATCH 2/3] io_uring: no need to reap event repeatedly Jackie Liu
2019-05-23  1:59 ` [PATCH 3/3] io_uring: adjust the code logic when an error occurs Jackie Liu
2019-05-23  3:15 ` [PATCH 1/3] io_uring: free memory immediately when io_mem_alloc failed Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.