Linux-RDMA Archive on lore.kernel.org
 help / color / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@nvidia.com>
Cc: Leon Romanovsky <leonro@mellanox.com>, linux-rdma@vger.kernel.org
Subject: [PATCH rdma-next 14/14] RDMA/ucma: Remove closing and the close_wq
Date: Tue, 18 Aug 2020 15:05:26 +0300
Message-ID: <20200818120526.702120-15-leon@kernel.org> (raw)
In-Reply-To: <20200818120526.702120-1-leon@kernel.org>

From: Jason Gunthorpe <jgg@nvidia.com>

Use cancel_work_sync() to ensure that the wq is not running and simply
assign NULL to ctx->cm_id to indicate if the work ran or not. Delete the
close_wq since flush_workqueue() is no longer needed.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/core/ucma.c | 49 +++++++++++-----------------------
 1 file changed, 15 insertions(+), 34 deletions(-)

diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 40539c9625d6..46d37e470e98 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -80,7 +80,6 @@ struct ucma_file {
 	struct list_head	ctx_list;
 	struct list_head	event_list;
 	wait_queue_head_t	poll_wait;
-	struct workqueue_struct	*close_wq;
 };
 
 struct ucma_context {
@@ -96,10 +95,6 @@ struct ucma_context {
 	u64			uid;
 
 	struct list_head	list;
-	/* mark that device is in process of destroying the internal HW
-	 * resources, protected by the ctx_table lock
-	 */
-	int			closing;
 	/* sync between removal event and id destroy, protected by file mut */
 	int			destroying;
 	struct work_struct	close_work;
@@ -148,12 +143,9 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
 
 	xa_lock(&ctx_table);
 	ctx = _ucma_find_context(id, file);
-	if (!IS_ERR(ctx)) {
-		if (ctx->closing)
-			ctx = ERR_PTR(-EIO);
-		else if (!refcount_inc_not_zero(&ctx->ref))
+	if (!IS_ERR(ctx))
+		if (!refcount_inc_not_zero(&ctx->ref))
 			ctx = ERR_PTR(-ENXIO);
-	}
 	xa_unlock(&ctx_table);
 	return ctx;
 }
@@ -193,6 +185,14 @@ static void ucma_close_id(struct work_struct *work)
 	wait_for_completion(&ctx->comp);
 	/* No new events will be generated after destroying the id. */
 	rdma_destroy_id(ctx->cm_id);
+
+	/*
+	 * At this point ctx->ref is zero so the only place the ctx can be is in
+	 * a uevent or in __destroy_id(). Since the former doesn't touch
+	 * ctx->cm_id and the latter sync cancels this, there is no races with
+	 * this store.
+	 */
+	ctx->cm_id = NULL;
 }
 
 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
@@ -356,12 +356,8 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
 		wake_up_interruptible(&ctx->file->poll_wait);
 	}
 
-	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying) {
-		xa_lock(&ctx_table);
-		ctx->closing = 1;
-		xa_unlock(&ctx_table);
-		queue_work(ctx->file->close_wq, &ctx->close_work);
-	}
+	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
+		queue_work(system_unbound_wq, &ctx->close_work);
 	return 0;
 }
 
@@ -577,17 +573,10 @@ static int __destroy_id(struct ucma_context *ctx)
 		ucma_put_ctx(ctx);
 	}
 
-	flush_workqueue(ctx->file->close_wq);
+	cancel_work_sync(&ctx->close_work);
 	/* At this point it's guaranteed that there is no inflight closing task */
-	xa_lock(&ctx_table);
-	if (!ctx->closing) {
-		xa_unlock(&ctx_table);
-		ucma_put_ctx(ctx);
-		wait_for_completion(&ctx->comp);
-		rdma_destroy_id(ctx->cm_id);
-	} else {
-		xa_unlock(&ctx_table);
-	}
+	if (ctx->cm_id)
+		ucma_close_id(&ctx->close_work);
 	return ucma_free_ctx(ctx);
 }
 
@@ -1788,13 +1777,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
 	if (!file)
 		return -ENOMEM;
 
-	file->close_wq = alloc_ordered_workqueue("ucma_close_id",
-						 WQ_MEM_RECLAIM);
-	if (!file->close_wq) {
-		kfree(file);
-		return -ENOMEM;
-	}
-
 	INIT_LIST_HEAD(&file->event_list);
 	INIT_LIST_HEAD(&file->ctx_list);
 	init_waitqueue_head(&file->poll_wait);
@@ -1825,7 +1807,6 @@ static int ucma_close(struct inode *inode, struct file *filp)
 		xa_erase(&ctx_table, ctx->id);
 		__destroy_id(ctx);
 	}
-	destroy_workqueue(file->close_wq);
 	kfree(file);
 	return 0;
 }
-- 
2.26.2


  parent reply index

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-18 12:05 [PATCH rdma-next 00/14] Cleanup locking and events in ucma Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 01/14] RDMA/ucma: Fix refcount 0 incr in ucma_get_ctx() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 02/14] RDMA/ucma: Remove unnecessary locking of file->ctx_list in close Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 03/14] RDMA/ucma: Consolidate the two destroy flows Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 04/14] RDMA/ucma: Fix error cases around ucma_alloc_ctx() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 05/14] RDMA/ucma: Remove mc_list and rely on xarray Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 06/14] RDMA/cma: Add missing locking to rdma_accept() Leon Romanovsky
2021-02-09 14:46   ` Chuck Lever
2021-02-09 15:40     ` Jason Gunthorpe
2020-08-18 12:05 ` [PATCH rdma-next 07/14] RDMA/ucma: Do not use file->mut to lock destroying Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 08/14] RDMA/ucma: Fix the locking of ctx->file Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 09/14] RDMA/ucma: Fix locking for ctx->events_reported Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 10/14] RDMA/ucma: Add missing locking around rdma_leave_multicast() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 11/14] RDMA/ucma: Change backlog into an atomic Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 12/14] RDMA/ucma: Narrow file->mut in ucma_event_handler() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 13/14] RDMA/ucma: Rework how new connections are passed through event delivery Leon Romanovsky
2020-08-18 12:05 ` Leon Romanovsky [this message]
2020-08-27 11:39 ` [PATCH rdma-next 00/14] Cleanup locking and events in ucma Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200818120526.702120-15-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@nvidia.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Linux-RDMA Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-rdma/0 linux-rdma/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-rdma linux-rdma/ https://lore.kernel.org/linux-rdma \
		linux-rdma@vger.kernel.org
	public-inbox-index linux-rdma

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.linux-rdma


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git