Linux-NVME Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH] RDMA: Add rdma_connect_locked()
@ 2020-10-26 14:25 Jason Gunthorpe
  2020-10-26 16:01 ` santosh.shilimkar
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Jason Gunthorpe @ 2020-10-26 14:25 UTC (permalink / raw)
  To: Danil Kipnis, Doug Ledford, Christoph Hellwig, Jack Wang,
	Keith Busch, linux-nvme, linux-rdma, Max Gurtovoy, netdev,
	rds-devel, Sagi Grimberg, Santosh Shilimkar
  Cc: Guoqing Jiang, Leon Romanovsky

There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the
handler triggers a completion and another thread does rdma_connect() or
the handler directly calls rdma_connect().

In all cases rdma_connect() needs to hold the handler_mutex, but when
handler's are invoked this is already held by the core code. This causes
ULPs using the 2nd method to deadlock.

Provide a rdma_connect_locked() and have all ULPs call it from their
handlers.

Reported-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state"
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/infiniband/core/cma.c            | 39 +++++++++++++++++++++---
 drivers/infiniband/ulp/iser/iser_verbs.c |  2 +-
 drivers/infiniband/ulp/rtrs/rtrs-clt.c   |  4 +--
 drivers/nvme/host/rdma.c                 | 10 +++---
 include/rdma/rdma_cm.h                   | 13 +-------
 net/rds/ib_cm.c                          |  5 +--
 6 files changed, 47 insertions(+), 26 deletions(-)

Seems people are not testing these four ULPs against rdma-next.. Here is a
quick fix for the issue:

https://lore.kernel.org/r/3b1f7767-98e2-93e0-b718-16d1c5346140@cloud.ionos.com

Jason

diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 7c2ab1f2fbea37..2eaaa1292fb847 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
 	/*
 	 * The FSM uses a funny double locking where state is protected by both
 	 * the handler_mutex and the spinlock. State is not allowed to change
-	 * away from a handler_mutex protected value without also holding
+	 * to/from a handler_mutex protected value without also holding
 	 * handler_mutex.
 	 */
-	if (comp == RDMA_CM_CONNECT)
+	if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
 		lockdep_assert_held(&id_priv->handler_mutex);
 
 	spin_lock_irqsave(&id_priv->lock, flags);
@@ -4038,13 +4038,20 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
 	return ret;
 }
 
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 {
 	struct rdma_id_private *id_priv =
 		container_of(id, struct rdma_id_private, id);
 	int ret;
 
-	mutex_lock(&id_priv->handler_mutex);
 	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) {
 		ret = -EINVAL;
 		goto err_unlock;
@@ -4071,6 +4078,30 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 err_state:
 	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
 err_unlock:
+	return ret;
+}
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's.  The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+	struct rdma_id_private *id_priv =
+		container_of(id, struct rdma_id_private, id);
+	int ret;
+
+	mutex_lock(&id_priv->handler_mutex);
+	ret = rdma_connect_locked(id, conn_param);
 	mutex_unlock(&id_priv->handler_mutex);
 	return ret;
 }
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2f3ebc0a75d924..2bd18b00689341 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
 	conn_param.private_data	= (void *)&req_hdr;
 	conn_param.private_data_len = sizeof(struct iser_cm_hdr);
 
-	ret = rdma_connect(cma_id, &conn_param);
+	ret = rdma_connect_locked(cma_id, &conn_param);
 	if (ret) {
 		iser_err("failure connecting: %d\n", ret);
 		goto failure;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 776e89231c52f7..f298adc02acba2 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
 	uuid_copy(&msg.sess_uuid, &sess->s.uuid);
 	uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
 
-	err = rdma_connect(con->c.cm_id, &param);
+	err = rdma_connect_locked(con->c.cm_id, &param);
 	if (err)
-		rtrs_err(clt, "rdma_connect(): %d\n", err);
+		rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
 
 	return err;
 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index aad829a2b50d0f..f488dc5f4c2c61 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1730,11 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 	req->result = cqe->result;
 
 	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
-		if (unlikely(!req->mr ||
-			     wc->ex.invalidate_rkey != req->mr->rkey)) {
+		if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
 			dev_err(queue->ctrl->ctrl.device,
 				"Bogus remote invalidation for rkey %#x\n",
-				req->mr ? req->mr->rkey : 0);
+				req->mr->rkey);
 			nvme_rdma_error_recovery(queue->ctrl);
 		}
 	} else if (req->mr) {
@@ -1890,10 +1889,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
 	}
 
-	ret = rdma_connect(queue->cm_id, &param);
+	ret = rdma_connect_locked(queue->cm_id, &param);
 	if (ret) {
 		dev_err(ctrl->ctrl.device,
-			"rdma_connect failed (%d).\n", ret);
+			"rdma_connect_locked failed (%d).\n", ret);
 		goto out_destroy_queue_ib;
 	}
 
@@ -1927,6 +1926,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
 		complete(&queue->cm_done);
 		return 0;
 	case RDMA_CM_EVENT_REJECTED:
+		nvme_rdma_destroy_queue_ib(queue);
 		cm_error = nvme_rdma_conn_rejected(queue, ev);
 		break;
 	case RDMA_CM_EVENT_ROUTE_ERROR:
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c672ae1da26bb5..937d55611cd073 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -227,19 +227,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
 		       int *qp_attr_mask);
 
-/**
- * rdma_connect - Initiate an active connection request.
- * @id: Connection identifier to connect.
- * @conn_param: Connection information used for connected QPs.
- *
- * Users must have resolved a route for the rdma_cm_id to connect with
- * by having called rdma_resolve_route before calling this routine.
- *
- * This call will either connect to a remote QP or obtain remote QP
- * information for unconnected rdma_cm_id's.  The actual operation is
- * based on the rdma_cm_id's port space.
- */
 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
 
 int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
 		     struct rdma_ucm_ece *ece);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 06603dd1c8aa38..b36b60668b1da9 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
 	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
 				  conn->c_proposed_version,
 				  UINT_MAX, UINT_MAX, isv6);
-	ret = rdma_connect(cm_id, &conn_param);
+	ret = rdma_connect_locked(cm_id, &conn_param);
 	if (ret)
-		rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
+		rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
+				  ret);
 
 out:
 	/* Beware - returning non-zero tells the rdma_cm to destroy
-- 
2.28.0


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
@ 2020-10-26 16:01 ` santosh.shilimkar
  2020-10-27  2:01 ` Chao Leng
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: santosh.shilimkar @ 2020-10-26 16:01 UTC (permalink / raw)
  To: Jason Gunthorpe, Danil Kipnis, Doug Ledford, Christoph Hellwig,
	Jack Wang, Keith Busch, linux-nvme, linux-rdma, Max Gurtovoy,
	netdev, rds-devel, Sagi Grimberg
  Cc: Guoqing Jiang, Leon Romanovsky



On 10/26/20 7:25 AM, Jason Gunthorpe wrote:
> There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the
> handler triggers a completion and another thread does rdma_connect() or
> the handler directly calls rdma_connect().
> 
> In all cases rdma_connect() needs to hold the handler_mutex, but when
> handler's are invoked this is already held by the core code. This causes
> ULPs using the 2nd method to deadlock.
> 
> Provide a rdma_connect_locked() and have all ULPs call it from their
> handlers.
> 
> Reported-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
> Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state"
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---

[....]

> diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
> index 06603dd1c8aa38..b36b60668b1da9 100644
> --- a/net/rds/ib_cm.c
> +++ b/net/rds/ib_cm.c
> @@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
>   	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
>   				  conn->c_proposed_version,
>   				  UINT_MAX, UINT_MAX, isv6);
> -	ret = rdma_connect(cm_id, &conn_param);
> +	ret = rdma_connect_locked(cm_id, &conn_param);
>   	if (ret)
> -		rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
> +		rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
> +				  ret);
>   
>   out:
>   	/* Beware - returning non-zero tells the rdma_cm to destroy
> 
For RDS part,
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
  2020-10-26 16:01 ` santosh.shilimkar
@ 2020-10-27  2:01 ` Chao Leng
  2020-10-27 12:00   ` Jason Gunthorpe
  2020-10-27  7:33 ` Jinpu Wang
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 7+ messages in thread
From: Chao Leng @ 2020-10-27  2:01 UTC (permalink / raw)
  To: Jason Gunthorpe, Danil Kipnis, Doug Ledford, Christoph Hellwig,
	Jack Wang, Keith Busch, linux-nvme, linux-rdma, Max Gurtovoy,
	netdev, rds-devel, Sagi Grimberg, Santosh Shilimkar
  Cc: Guoqing Jiang, Leon Romanovsky



On 2020/10/26 22:25, Jason Gunthorpe wrote:
> There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the
> handler triggers a completion and another thread does rdma_connect() or
> the handler directly calls rdma_connect().
> 
> In all cases rdma_connect() needs to hold the handler_mutex, but when
> handler's are invoked this is already held by the core code. This causes
> ULPs using the 2nd method to deadlock.
> 
> Provide a rdma_connect_locked() and have all ULPs call it from their
> handlers.
> 
> Reported-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
> Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state"
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/infiniband/core/cma.c            | 39 +++++++++++++++++++++---
>   drivers/infiniband/ulp/iser/iser_verbs.c |  2 +-
>   drivers/infiniband/ulp/rtrs/rtrs-clt.c   |  4 +--
>   drivers/nvme/host/rdma.c                 | 10 +++---
>   include/rdma/rdma_cm.h                   | 13 +-------
>   net/rds/ib_cm.c                          |  5 +--
>   6 files changed, 47 insertions(+), 26 deletions(-)
> 
> Seems people are not testing these four ULPs against rdma-next.. Here is a
> quick fix for the issue:
> 
> https://lore.kernel.org/r/3b1f7767-98e2-93e0-b718-16d1c5346140@cloud.ionos.com
> 
> Jason
> 
> diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
> index 7c2ab1f2fbea37..2eaaa1292fb847 100644
> --- a/drivers/infiniband/core/cma.c
> +++ b/drivers/infiniband/core/cma.c
> @@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
>   	/*
>   	 * The FSM uses a funny double locking where state is protected by both
>   	 * the handler_mutex and the spinlock. State is not allowed to change
> -	 * away from a handler_mutex protected value without also holding
> +	 * to/from a handler_mutex protected value without also holding
>   	 * handler_mutex.
>   	 */
> -	if (comp == RDMA_CM_CONNECT)
> +	if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
>   		lockdep_assert_held(&id_priv->handler_mutex);
>   
>   	spin_lock_irqsave(&id_priv->lock, flags);
> @@ -4038,13 +4038,20 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
>   	return ret;
>   }
>   
> -int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
> +/**
> + * rdma_connect_locked - Initiate an active connection request.
> + * @id: Connection identifier to connect.
> + * @conn_param: Connection information used for connected QPs.
> + *
> + * Same as rdma_connect() but can only be called from the
> + * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
> + */
> +int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>   {
>   	struct rdma_id_private *id_priv =
>   		container_of(id, struct rdma_id_private, id);
>   	int ret;
>   
> -	mutex_lock(&id_priv->handler_mutex);
>   	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) {
>   		ret = -EINVAL;
>   		goto err_unlock;
> @@ -4071,6 +4078,30 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>   err_state:
>   	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
>   err_unlock:
> +	return ret;
> +}
> +EXPORT_SYMBOL(rdma_connect_locked);
> +
> +/**
> + * rdma_connect - Initiate an active connection request.
> + * @id: Connection identifier to connect.
> + * @conn_param: Connection information used for connected QPs.
> + *
> + * Users must have resolved a route for the rdma_cm_id to connect with by having
> + * called rdma_resolve_route before calling this routine.
> + *
> + * This call will either connect to a remote QP or obtain remote QP information
> + * for unconnected rdma_cm_id's.  The actual operation is based on the
> + * rdma_cm_id's port space.
> + */
> +int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
> +{
> +	struct rdma_id_private *id_priv =
> +		container_of(id, struct rdma_id_private, id);
> +	int ret;
> +
> +	mutex_lock(&id_priv->handler_mutex);
> +	ret = rdma_connect_locked(id, conn_param);
>   	mutex_unlock(&id_priv->handler_mutex);
>   	return ret;
>   }
> diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
> index 2f3ebc0a75d924..2bd18b00689341 100644
> --- a/drivers/infiniband/ulp/iser/iser_verbs.c
> +++ b/drivers/infiniband/ulp/iser/iser_verbs.c
> @@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
>   	conn_param.private_data	= (void *)&req_hdr;
>   	conn_param.private_data_len = sizeof(struct iser_cm_hdr);
>   
> -	ret = rdma_connect(cma_id, &conn_param);
> +	ret = rdma_connect_locked(cma_id, &conn_param);
>   	if (ret) {
>   		iser_err("failure connecting: %d\n", ret);
>   		goto failure;
> diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> index 776e89231c52f7..f298adc02acba2 100644
> --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> @@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
>   	uuid_copy(&msg.sess_uuid, &sess->s.uuid);
>   	uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
>   
> -	err = rdma_connect(con->c.cm_id, &param);
> +	err = rdma_connect_locked(con->c.cm_id, &param);
>   	if (err)
> -		rtrs_err(clt, "rdma_connect(): %d\n", err);
> +		rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
>   
>   	return err;
>   }
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index aad829a2b50d0f..f488dc5f4c2c61 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -1730,11 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
>   	req->result = cqe->result;
>   
>   	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
> -		if (unlikely(!req->mr ||
> -			     wc->ex.invalidate_rkey != req->mr->rkey)) {
> +		if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
>   			dev_err(queue->ctrl->ctrl.device,
>   				"Bogus remote invalidation for rkey %#x\n",
> -				req->mr ? req->mr->rkey : 0);
> +				req->mr->rkey);
Maybe the code version is incorrect, cause falsely code rollback.
>   			nvme_rdma_error_recovery(queue->ctrl);
>   		}
>   	} else if (req->mr) {
> @@ -1890,10 +1889,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
>   		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
>   	}
>   
> -	ret = rdma_connect(queue->cm_id, &param);
> +	ret = rdma_connect_locked(queue->cm_id, &param);
>   	if (ret) {
>   		dev_err(ctrl->ctrl.device,
> -			"rdma_connect failed (%d).\n", ret);
> +			"rdma_connect_locked failed (%d).\n", ret);
>   		goto out_destroy_queue_ib;
>   	}
>   
> @@ -1927,6 +1926,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
>   		complete(&queue->cm_done);
>   		return 0;
>   	case RDMA_CM_EVENT_REJECTED:
> +		nvme_rdma_destroy_queue_ib(queue);
Maybe the code version is incorrect, cause falsely code rollback.
>   		cm_error = nvme_rdma_conn_rejected(queue, ev);
>   		break;
>   	case RDMA_CM_EVENT_ROUTE_ERROR:
> diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
> index c672ae1da26bb5..937d55611cd073 100644
> --- a/include/rdma/rdma_cm.h
> +++ b/include/rdma/rdma_cm.h
> @@ -227,19 +227,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
>   int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
>   		       int *qp_attr_mask);
>   
> -/**
> - * rdma_connect - Initiate an active connection request.
> - * @id: Connection identifier to connect.
> - * @conn_param: Connection information used for connected QPs.
> - *
> - * Users must have resolved a route for the rdma_cm_id to connect with
> - * by having called rdma_resolve_route before calling this routine.
> - *
> - * This call will either connect to a remote QP or obtain remote QP
> - * information for unconnected rdma_cm_id's.  The actual operation is
> - * based on the rdma_cm_id's port space.
> - */
>   int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
> +int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
>   
>   int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
>   		     struct rdma_ucm_ece *ece);
> diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
> index 06603dd1c8aa38..b36b60668b1da9 100644
> --- a/net/rds/ib_cm.c
> +++ b/net/rds/ib_cm.c
> @@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
>   	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
>   				  conn->c_proposed_version,
>   				  UINT_MAX, UINT_MAX, isv6);
> -	ret = rdma_connect(cm_id, &conn_param);
> +	ret = rdma_connect_locked(cm_id, &conn_param);
>   	if (ret)
> -		rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
> +		rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
> +				  ret);
>   
>   out:
>   	/* Beware - returning non-zero tells the rdma_cm to destroy
> 

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
  2020-10-26 16:01 ` santosh.shilimkar
  2020-10-27  2:01 ` Chao Leng
@ 2020-10-27  7:33 ` Jinpu Wang
  2020-10-27  8:04 ` Christoph Hellwig
  2020-10-27 12:05 ` Guoqing Jiang
  4 siblings, 0 replies; 7+ messages in thread
From: Jinpu Wang @ 2020-10-27  7:33 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: Max Gurtovoy, rds-devel, Sagi Grimberg, linux-rdma, netdev,
	Santosh Shilimkar, Guoqing Jiang, linux-nvme, Doug Ledford,
	Danil Kipnis, Keith Busch, Leon Romanovsky, Christoph Hellwig

On Mon, Oct 26, 2020 at 3:25 PM Jason Gunthorpe <jgg@nvidia.com> wrote:
>
> There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the
> handler triggers a completion and another thread does rdma_connect() or
> the handler directly calls rdma_connect().
>
> In all cases rdma_connect() needs to hold the handler_mutex, but when
> handler's are invoked this is already held by the core code. This causes
> ULPs using the 2nd method to deadlock.
>
> Provide a rdma_connect_locked() and have all ULPs call it from their
> handlers.
>
> Reported-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
> Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state"
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/infiniband/core/cma.c            | 39 +++++++++++++++++++++---
>  drivers/infiniband/ulp/iser/iser_verbs.c |  2 +-
>  drivers/infiniband/ulp/rtrs/rtrs-clt.c   |  4 +--
>  drivers/nvme/host/rdma.c                 | 10 +++---
>  include/rdma/rdma_cm.h                   | 13 +-------
>  net/rds/ib_cm.c                          |  5 +--
>  6 files changed, 47 insertions(+), 26 deletions(-)
>
> Seems people are not testing these four ULPs against rdma-next.. Here is a
> quick fix for the issue:
>
> https://lore.kernel.org/r/3b1f7767-98e2-93e0-b718-16d1c5346140@cloud.ionos.com
>
> Jason
>
> diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
> index 7c2ab1f2fbea37..2eaaa1292fb847 100644
> --- a/drivers/infiniband/core/cma.c
> +++ b/drivers/infiniband/core/cma.c
> @@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
>         /*
>          * The FSM uses a funny double locking where state is protected by both
>          * the handler_mutex and the spinlock. State is not allowed to change
> -        * away from a handler_mutex protected value without also holding
> +        * to/from a handler_mutex protected value without also holding
>          * handler_mutex.
>          */
> -       if (comp == RDMA_CM_CONNECT)
> +       if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
>                 lockdep_assert_held(&id_priv->handler_mutex);
>
>         spin_lock_irqsave(&id_priv->lock, flags);
> @@ -4038,13 +4038,20 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
>         return ret;
>  }
>
> -int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
> +/**
> + * rdma_connect_locked - Initiate an active connection request.
> + * @id: Connection identifier to connect.
> + * @conn_param: Connection information used for connected QPs.
> + *
> + * Same as rdma_connect() but can only be called from the
> + * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
> + */
> +int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>  {
>         struct rdma_id_private *id_priv =
>                 container_of(id, struct rdma_id_private, id);
>         int ret;
>
> -       mutex_lock(&id_priv->handler_mutex);
>         if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) {
>                 ret = -EINVAL;
>                 goto err_unlock;
> @@ -4071,6 +4078,30 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
>  err_state:
>         cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
>  err_unlock:
> +       return ret;
> +}
> +EXPORT_SYMBOL(rdma_connect_locked);
> +
> +/**
> + * rdma_connect - Initiate an active connection request.
> + * @id: Connection identifier to connect.
> + * @conn_param: Connection information used for connected QPs.
> + *
> + * Users must have resolved a route for the rdma_cm_id to connect with by having
> + * called rdma_resolve_route before calling this routine.
> + *
> + * This call will either connect to a remote QP or obtain remote QP information
> + * for unconnected rdma_cm_id's.  The actual operation is based on the
> + * rdma_cm_id's port space.
> + */
> +int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
> +{
> +       struct rdma_id_private *id_priv =
> +               container_of(id, struct rdma_id_private, id);
> +       int ret;
> +
> +       mutex_lock(&id_priv->handler_mutex);
> +       ret = rdma_connect_locked(id, conn_param);
>         mutex_unlock(&id_priv->handler_mutex);
>         return ret;
>  }
> diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
> index 2f3ebc0a75d924..2bd18b00689341 100644
> --- a/drivers/infiniband/ulp/iser/iser_verbs.c
> +++ b/drivers/infiniband/ulp/iser/iser_verbs.c
> @@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
>         conn_param.private_data = (void *)&req_hdr;
>         conn_param.private_data_len = sizeof(struct iser_cm_hdr);
>
> -       ret = rdma_connect(cma_id, &conn_param);
> +       ret = rdma_connect_locked(cma_id, &conn_param);
>         if (ret) {
>                 iser_err("failure connecting: %d\n", ret);
>                 goto failure;
> diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> index 776e89231c52f7..f298adc02acba2 100644
> --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
> @@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
>         uuid_copy(&msg.sess_uuid, &sess->s.uuid);
>         uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
>
> -       err = rdma_connect(con->c.cm_id, &param);
> +       err = rdma_connect_locked(con->c.cm_id, &param);
>         if (err)
> -               rtrs_err(clt, "rdma_connect(): %d\n", err);
> +               rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
>
>         return err;
>  }
For rtrs, looks good to me!
Thanks for the quick fix.
Acked-by: Jack Wang <jinpu.wang@cloud.ionos.com>

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
                   ` (2 preceding siblings ...)
  2020-10-27  7:33 ` Jinpu Wang
@ 2020-10-27  8:04 ` Christoph Hellwig
  2020-10-27 12:05 ` Guoqing Jiang
  4 siblings, 0 replies; 7+ messages in thread
From: Christoph Hellwig @ 2020-10-27  8:04 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: Max Gurtovoy, rds-devel, Sagi Grimberg, linux-rdma, netdev,
	Santosh Shilimkar, Guoqing Jiang, linux-nvme, Doug Ledford,
	Danil Kipnis, Keith Busch, Jack Wang, Leon Romanovsky,
	Christoph Hellwig

> +int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)

This adds an overly long line.

> +int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);

Same here.

Otherwise looks good except for the nvme merge error pointed out by
Chao Leng.

Reviewed-by: Christoph Hellwig <hch@lst.de>

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-27  2:01 ` Chao Leng
@ 2020-10-27 12:00   ` Jason Gunthorpe
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Gunthorpe @ 2020-10-27 12:00 UTC (permalink / raw)
  To: Chao Leng
  Cc: Max Gurtovoy, rds-devel, Sagi Grimberg, linux-rdma, netdev,
	Santosh Shilimkar, Guoqing Jiang, linux-nvme, Doug Ledford,
	Danil Kipnis, Keith Busch, Jack Wang, Leon Romanovsky,
	Christoph Hellwig

On Tue, Oct 27, 2020 at 10:01:00AM +0800, Chao Leng wrote:
> > diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> > index aad829a2b50d0f..f488dc5f4c2c61 100644
> > +++ b/drivers/nvme/host/rdma.c
> > @@ -1730,11 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
> >   	req->result = cqe->result;
> >   	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
> > -		if (unlikely(!req->mr ||
> > -			     wc->ex.invalidate_rkey != req->mr->rkey)) {
> > +		if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
> >   			dev_err(queue->ctrl->ctrl.device,
> >   				"Bogus remote invalidation for rkey %#x\n",
> > -				req->mr ? req->mr->rkey : 0);
> > +				req->mr->rkey);
> Maybe the code version is incorrect, cause falsely code rollback.

Oh wow, thanks for noticing that, I made a git fumble when doing this
:(

Jason

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] RDMA: Add rdma_connect_locked()
  2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
                   ` (3 preceding siblings ...)
  2020-10-27  8:04 ` Christoph Hellwig
@ 2020-10-27 12:05 ` Guoqing Jiang
  4 siblings, 0 replies; 7+ messages in thread
From: Guoqing Jiang @ 2020-10-27 12:05 UTC (permalink / raw)
  To: Jason Gunthorpe, Danil Kipnis, Doug Ledford, Christoph Hellwig,
	Jack Wang, Keith Busch, linux-nvme, linux-rdma, Max Gurtovoy,
	netdev, rds-devel, Sagi Grimberg, Santosh Shilimkar
  Cc: Leon Romanovsky



On 10/26/20 15:25, Jason Gunthorpe wrote:
> There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the
> handler triggers a completion and another thread does rdma_connect() or
> the handler directly calls rdma_connect().
> 
> In all cases rdma_connect() needs to hold the handler_mutex, but when
> handler's are invoked this is already held by the core code. This causes
> ULPs using the 2nd method to deadlock.
> 
> Provide a rdma_connect_locked() and have all ULPs call it from their
> handlers.
> 
> Reported-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
> Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state"
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/infiniband/core/cma.c            | 39 +++++++++++++++++++++---
>   drivers/infiniband/ulp/iser/iser_verbs.c |  2 +-
>   drivers/infiniband/ulp/rtrs/rtrs-clt.c   |  4 +--
>   drivers/nvme/host/rdma.c                 | 10 +++---
>   include/rdma/rdma_cm.h                   | 13 +-------
>   net/rds/ib_cm.c                          |  5 +--
>   6 files changed, 47 insertions(+), 26 deletions(-)
> 
> Seems people are not testing these four ULPs against rdma-next.. Here is a
> quick fix for the issue:
> 
> https://lore.kernel.org/r/3b1f7767-98e2-93e0-b718-16d1c5346140@cloud.ionos.com

I can't see the previous calltrace with this patch.

Tested-by: Guoqing Jiang<guoqing.jiang@cloud.ionos.com>


Thanks,
Guoqing

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, back to index

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-26 14:25 [PATCH] RDMA: Add rdma_connect_locked() Jason Gunthorpe
2020-10-26 16:01 ` santosh.shilimkar
2020-10-27  2:01 ` Chao Leng
2020-10-27 12:00   ` Jason Gunthorpe
2020-10-27  7:33 ` Jinpu Wang
2020-10-27  8:04 ` Christoph Hellwig
2020-10-27 12:05 ` Guoqing Jiang

Linux-NVME Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-nvme/0 linux-nvme/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-nvme linux-nvme/ https://lore.kernel.org/linux-nvme \
		linux-nvme@lists.infradead.org
	public-inbox-index linux-nvme

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.infradead.lists.linux-nvme


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git