All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@nvidia.com>
Cc: Leon Romanovsky <leonro@mellanox.com>, linux-rdma@vger.kernel.org
Subject: [PATCH rdma-next 04/14] RDMA/ucma: Fix error cases around ucma_alloc_ctx()
Date: Tue, 18 Aug 2020 15:05:16 +0300	[thread overview]
Message-ID: <20200818120526.702120-5-leon@kernel.org> (raw)
In-Reply-To: <20200818120526.702120-1-leon@kernel.org>

From: Jason Gunthorpe <jgg@nvidia.com>

The store to ctx->cm_id was based on the idea that _ucma_find_context()
would not return the ctx until it was fully setup.

Without locking this doesn't work properly.

Split things so that the xarray is allocated with NULL to reserve the ID
and once everything is final set the cm_id and store.

Along the way this shows that the error unwind in ucma_get_event() if a
new ctx is created is wrong, fix it up.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/core/ucma.c | 68 +++++++++++++++++++++-------------
 1 file changed, 42 insertions(+), 26 deletions(-)

diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 878cbb94065f..7416a5a6aa69 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -130,6 +130,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
 static DEFINE_XARRAY_ALLOC(multicast_table);
 
 static const struct file_operations ucma_fops;
+static int __destroy_id(struct ucma_context *ctx);
 
 static inline struct ucma_context *_ucma_find_context(int id,
 						      struct ucma_file *file)
@@ -139,7 +140,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
 	ctx = xa_load(&ctx_table, id);
 	if (!ctx)
 		ctx = ERR_PTR(-ENOENT);
-	else if (ctx->file != file || !ctx->cm_id)
+	else if (ctx->file != file)
 		ctx = ERR_PTR(-EINVAL);
 	return ctx;
 }
@@ -217,18 +218,23 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 	refcount_set(&ctx->ref, 1);
 	init_completion(&ctx->comp);
 	INIT_LIST_HEAD(&ctx->mc_list);
+	/* So list_del() will work if we don't do ucma_finish_ctx() */
+	INIT_LIST_HEAD(&ctx->list);
 	ctx->file = file;
 	mutex_init(&ctx->mutex);
 
-	if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
-		goto error;
-
-	list_add_tail(&ctx->list, &file->ctx_list);
+	if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
+		kfree(ctx);
+		return NULL;
+	}
 	return ctx;
+}
 
-error:
-	kfree(ctx);
-	return NULL;
+static void ucma_finish_ctx(struct ucma_context *ctx)
+{
+	lockdep_assert_held(&ctx->file->mut);
+	list_add_tail(&ctx->list, &ctx->file->ctx_list);
+	xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
 }
 
 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
@@ -399,7 +405,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 			      int in_len, int out_len)
 {
-	struct ucma_context *ctx;
+	struct ucma_context *ctx = NULL;
 	struct rdma_ucm_get_event cmd;
 	struct ucma_event *uevent;
 	int ret = 0;
@@ -429,33 +435,46 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 		mutex_lock(&file->mut);
 	}
 
-	uevent = list_entry(file->event_list.next, struct ucma_event, list);
+	uevent = list_first_entry(&file->event_list, struct ucma_event, list);
 
 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 		ctx = ucma_alloc_ctx(file);
 		if (!ctx) {
 			ret = -ENOMEM;
-			goto done;
+			goto err_unlock;
 		}
-		uevent->ctx->backlog++;
-		ctx->cm_id = uevent->cm_id;
-		ctx->cm_id->context = ctx;
 		uevent->resp.id = ctx->id;
+		ctx->cm_id = uevent->cm_id;
 	}
 
 	if (copy_to_user(u64_to_user_ptr(cmd.response),
 			 &uevent->resp,
 			 min_t(size_t, out_len, sizeof(uevent->resp)))) {
 		ret = -EFAULT;
-		goto done;
+		goto err_ctx;
+	}
+
+	if (ctx) {
+		uevent->ctx->backlog++;
+		uevent->cm_id->context = ctx;
+		ucma_finish_ctx(ctx);
 	}
 
 	list_del(&uevent->list);
 	uevent->ctx->events_reported++;
 	if (uevent->mc)
 		uevent->mc->events_reported++;
+	mutex_unlock(&file->mut);
+
 	kfree(uevent);
-done:
+	return 0;
+
+err_ctx:
+	if (ctx) {
+		xa_erase(&ctx_table, ctx->id);
+		kfree(ctx);
+	}
+err_unlock:
 	mutex_unlock(&file->mut);
 	return ret;
 }
@@ -498,9 +517,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 	if (ret)
 		return ret;
 
-	mutex_lock(&file->mut);
 	ctx = ucma_alloc_ctx(file);
-	mutex_unlock(&file->mut);
 	if (!ctx)
 		return -ENOMEM;
 
@@ -511,24 +528,23 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 		ret = PTR_ERR(cm_id);
 		goto err1;
 	}
+	ctx->cm_id = cm_id;
 
 	resp.id = ctx->id;
 	if (copy_to_user(u64_to_user_ptr(cmd.response),
 			 &resp, sizeof(resp))) {
-		ret = -EFAULT;
-		goto err2;
+		xa_erase(&ctx_table, ctx->id);
+		__destroy_id(ctx);
+		return -EFAULT;
 	}
 
-	ctx->cm_id = cm_id;
+	mutex_lock(&file->mut);
+	ucma_finish_ctx(ctx);
+	mutex_unlock(&file->mut);
 	return 0;
 
-err2:
-	rdma_destroy_id(cm_id);
 err1:
 	xa_erase(&ctx_table, ctx->id);
-	mutex_lock(&file->mut);
-	list_del(&ctx->list);
-	mutex_unlock(&file->mut);
 	kfree(ctx);
 	return ret;
 }
-- 
2.26.2


  parent reply	other threads:[~2020-08-18 12:05 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-18 12:05 [PATCH rdma-next 00/14] Cleanup locking and events in ucma Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 01/14] RDMA/ucma: Fix refcount 0 incr in ucma_get_ctx() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 02/14] RDMA/ucma: Remove unnecessary locking of file->ctx_list in close Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 03/14] RDMA/ucma: Consolidate the two destroy flows Leon Romanovsky
2020-08-18 12:05 ` Leon Romanovsky [this message]
2020-08-18 12:05 ` [PATCH rdma-next 05/14] RDMA/ucma: Remove mc_list and rely on xarray Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 06/14] RDMA/cma: Add missing locking to rdma_accept() Leon Romanovsky
2021-02-09 14:46   ` Chuck Lever
2021-02-09 15:40     ` Jason Gunthorpe
2020-08-18 12:05 ` [PATCH rdma-next 07/14] RDMA/ucma: Do not use file->mut to lock destroying Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 08/14] RDMA/ucma: Fix the locking of ctx->file Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 09/14] RDMA/ucma: Fix locking for ctx->events_reported Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 10/14] RDMA/ucma: Add missing locking around rdma_leave_multicast() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 11/14] RDMA/ucma: Change backlog into an atomic Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 12/14] RDMA/ucma: Narrow file->mut in ucma_event_handler() Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 13/14] RDMA/ucma: Rework how new connections are passed through event delivery Leon Romanovsky
2020-08-18 12:05 ` [PATCH rdma-next 14/14] RDMA/ucma: Remove closing and the close_wq Leon Romanovsky
2020-08-27 11:39 ` [PATCH rdma-next 00/14] Cleanup locking and events in ucma Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200818120526.702120-5-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@nvidia.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.