linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock
@ 2019-10-07 10:26 Krishnamraju Eraparaju
  2019-10-18 15:19 ` Bernard Metzler
  2019-10-18 18:47 ` Doug Ledford
  0 siblings, 2 replies; 3+ messages in thread
From: Krishnamraju Eraparaju @ 2019-10-07 10:26 UTC (permalink / raw)
  To: jgg, bmt
  Cc: linux-rdma, bharat, nirranjan, sagi, larrystevenwise,
	Krishnamraju Eraparaju

kref release routines usually perform memory release operations,
hence, they should not be called with spinlocks held.
one such case is: SIW kref release routine siw_free_qp(), which
can sleep via vfree() while freeing queue memory.

Hence, all iw_rem_ref() calls in IWCM are moved out of spinlocks.

Fixes: 922a8e9fb2e0 ("RDMA: iWARP Connection Manager.")
Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
---
v0 -> v1:
-changed component name in subject line: siw->iwcm
-added "Fixes" line.
---
 drivers/infiniband/core/iwcm.c | 52 +++++++++++++++++++---------------
 1 file changed, 29 insertions(+), 23 deletions(-)

diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 72141c5b7c95..ade71823370f 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
 static void destroy_cm_id(struct iw_cm_id *cm_id)
 {
 	struct iwcm_id_private *cm_id_priv;
+	struct ib_qp *qp;
 	unsigned long flags;
 
 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
 	set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	qp = cm_id_priv->qp;
+	cm_id_priv->qp = NULL;
+
 	switch (cm_id_priv->state) {
 	case IW_CM_STATE_LISTEN:
 		cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
 		cm_id_priv->state = IW_CM_STATE_DESTROYING;
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 		/* Abrupt close of the connection */
-		(void)iwcm_modify_qp_err(cm_id_priv->qp);
+		(void)iwcm_modify_qp_err(qp);
 		spin_lock_irqsave(&cm_id_priv->lock, flags);
 		break;
 	case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
 		BUG();
 		break;
 	}
-	if (cm_id_priv->qp) {
-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-		cm_id_priv->qp = NULL;
-	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	if (qp)
+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
 
 	if (cm_id->mapped) {
 		iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
 		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
 		cm_id_priv->state = IW_CM_STATE_IDLE;
 		spin_lock_irqsave(&cm_id_priv->lock, flags);
-		if (cm_id_priv->qp) {
-			cm_id->device->ops.iw_rem_ref(qp);
-			cm_id_priv->qp = NULL;
-		}
+		qp = cm_id_priv->qp;
+		cm_id_priv->qp = NULL;
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		if (qp)
+			cm_id->device->ops.iw_rem_ref(qp);
 		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 		wake_up_all(&cm_id_priv->connect_wait);
 	}
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 	struct iwcm_id_private *cm_id_priv;
 	int ret;
 	unsigned long flags;
-	struct ib_qp *qp;
+	struct ib_qp *qp = NULL;
 
 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 		return 0;	/* success */
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
-	if (cm_id_priv->qp) {
-		cm_id->device->ops.iw_rem_ref(qp);
-		cm_id_priv->qp = NULL;
-	}
+	qp = cm_id_priv->qp;
+	cm_id_priv->qp = NULL;
 	cm_id_priv->state = IW_CM_STATE_IDLE;
 err:
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	if (qp)
+		cm_id->device->ops.iw_rem_ref(qp);
 	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
 	wake_up_all(&cm_id_priv->connect_wait);
 	return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
 			       struct iw_cm_event *iw_event)
 {
+	struct ib_qp *qp = NULL;
 	unsigned long flags;
 	int ret;
 
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
 		cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
 	} else {
 		/* REJECTED or RESET */
-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+		qp = cm_id_priv->qp;
 		cm_id_priv->qp = NULL;
 		cm_id_priv->state = IW_CM_STATE_IDLE;
 	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	if (qp)
+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 
 	if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
 				  struct iw_cm_event *iw_event)
 {
+	struct ib_qp *qp;
 	unsigned long flags;
-	int ret = 0;
+	int ret = 0, notify_event = 0;
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	qp = cm_id_priv->qp;
+	cm_id_priv->qp = NULL;
 
-	if (cm_id_priv->qp) {
-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-		cm_id_priv->qp = NULL;
-	}
 	switch (cm_id_priv->state) {
 	case IW_CM_STATE_ESTABLISHED:
 	case IW_CM_STATE_CLOSING:
 		cm_id_priv->state = IW_CM_STATE_IDLE;
-		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
-		spin_lock_irqsave(&cm_id_priv->lock, flags);
+		notify_event = 1;
 		break;
 	case IW_CM_STATE_DESTROYING:
 		break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
 	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+	if (qp)
+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
+	if (notify_event)
+		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 	return ret;
 }
 
-- 
2.23.0.rc0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re:  [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock
  2019-10-07 10:26 [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock Krishnamraju Eraparaju
@ 2019-10-18 15:19 ` Bernard Metzler
  2019-10-18 18:47 ` Doug Ledford
  1 sibling, 0 replies; 3+ messages in thread
From: Bernard Metzler @ 2019-10-18 15:19 UTC (permalink / raw)
  To: Krishnamraju Eraparaju
  Cc: jgg, linux-rdma, bharat, nirranjan, sagi, larrystevenwise

-----"Krishnamraju Eraparaju" <krishna2@chelsio.com> wrote: -----

>To: jgg@ziepe.ca, bmt@zurich.ibm.com
>From: "Krishnamraju Eraparaju" <krishna2@chelsio.com>
>Date: 10/07/2019 12:28PM
>Cc: linux-rdma@vger.kernel.org, bharat@chelsio.com,
>nirranjan@chelsio.com, sagi@grimberg.me, larrystevenwise@gmail.com,
>"Krishnamraju Eraparaju" <krishna2@chelsio.com>
>Subject: [EXTERNAL] [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref()
>calls out of spinlock
>
>kref release routines usually perform memory release operations,
>hence, they should not be called with spinlocks held.
>one such case is: SIW kref release routine siw_free_qp(), which
>can sleep via vfree() while freeing queue memory.
>
>Hence, all iw_rem_ref() calls in IWCM are moved out of spinlocks.
>
>Fixes: 922a8e9fb2e0 ("RDMA: iWARP Connection Manager.")
>Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
>---
>v0 -> v1:
>-changed component name in subject line: siw->iwcm
>-added "Fixes" line.
>---
> drivers/infiniband/core/iwcm.c | 52
>+++++++++++++++++++---------------
> 1 file changed, 29 insertions(+), 23 deletions(-)
>
>diff --git a/drivers/infiniband/core/iwcm.c
>b/drivers/infiniband/core/iwcm.c
>index 72141c5b7c95..ade71823370f 100644
>--- a/drivers/infiniband/core/iwcm.c
>+++ b/drivers/infiniband/core/iwcm.c
>@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
> static void destroy_cm_id(struct iw_cm_id *cm_id)
> {
> 	struct iwcm_id_private *cm_id_priv;
>+	struct ib_qp *qp;
> 	unsigned long flags;
> 
> 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
>@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
> 	set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
> 
> 	spin_lock_irqsave(&cm_id_priv->lock, flags);
>+	qp = cm_id_priv->qp;
>+	cm_id_priv->qp = NULL;
>+
> 	switch (cm_id_priv->state) {
> 	case IW_CM_STATE_LISTEN:
> 		cm_id_priv->state = IW_CM_STATE_DESTROYING;
>@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
> 		cm_id_priv->state = IW_CM_STATE_DESTROYING;
> 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
> 		/* Abrupt close of the connection */
>-		(void)iwcm_modify_qp_err(cm_id_priv->qp);
>+		(void)iwcm_modify_qp_err(qp);
> 		spin_lock_irqsave(&cm_id_priv->lock, flags);
> 		break;
> 	case IW_CM_STATE_IDLE:
>@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id
>*cm_id)
> 		BUG();
> 		break;
> 	}
>-	if (cm_id_priv->qp) {
>-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
>-		cm_id_priv->qp = NULL;
>-	}
> 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
>+	if (qp)
>+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
> 
> 	if (cm_id->mapped) {
> 		iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
>@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
> 		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
> 		cm_id_priv->state = IW_CM_STATE_IDLE;
> 		spin_lock_irqsave(&cm_id_priv->lock, flags);
>-		if (cm_id_priv->qp) {
>-			cm_id->device->ops.iw_rem_ref(qp);
>-			cm_id_priv->qp = NULL;
>-		}
>+		qp = cm_id_priv->qp;
>+		cm_id_priv->qp = NULL;
> 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
>+		if (qp)
>+			cm_id->device->ops.iw_rem_ref(qp);
> 		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
> 		wake_up_all(&cm_id_priv->connect_wait);
> 	}
>@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct
>iw_cm_conn_param *iw_param)
> 	struct iwcm_id_private *cm_id_priv;
> 	int ret;
> 	unsigned long flags;
>-	struct ib_qp *qp;
>+	struct ib_qp *qp = NULL;
> 
> 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
> 
>@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id,
>struct iw_cm_conn_param *iw_param)
> 		return 0;	/* success */
> 
> 	spin_lock_irqsave(&cm_id_priv->lock, flags);
>-	if (cm_id_priv->qp) {
>-		cm_id->device->ops.iw_rem_ref(qp);
>-		cm_id_priv->qp = NULL;
>-	}
>+	qp = cm_id_priv->qp;
>+	cm_id_priv->qp = NULL;
> 	cm_id_priv->state = IW_CM_STATE_IDLE;
> err:
> 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
>+	if (qp)
>+		cm_id->device->ops.iw_rem_ref(qp);
> 	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
> 	wake_up_all(&cm_id_priv->connect_wait);
> 	return ret;
>@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct
>iwcm_id_private *cm_id_priv,
> static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
> 			       struct iw_cm_event *iw_event)
> {
>+	struct ib_qp *qp = NULL;
> 	unsigned long flags;
> 	int ret;
> 
>@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct
>iwcm_id_private *cm_id_priv,
> 		cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
> 	} else {
> 		/* REJECTED or RESET */
>-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
>+		qp = cm_id_priv->qp;
> 		cm_id_priv->qp = NULL;
> 		cm_id_priv->state = IW_CM_STATE_IDLE;
> 	}
> 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
>+	if (qp)
>+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
> 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
> 
> 	if (iw_event->private_data_len)
>@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct
>iwcm_id_private *cm_id_priv,
> static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
> 				  struct iw_cm_event *iw_event)
> {
>+	struct ib_qp *qp;
> 	unsigned long flags;
>-	int ret = 0;
>+	int ret = 0, notify_event = 0;
> 	spin_lock_irqsave(&cm_id_priv->lock, flags);
>+	qp = cm_id_priv->qp;
>+	cm_id_priv->qp = NULL;
> 
>-	if (cm_id_priv->qp) {
>-		cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
>-		cm_id_priv->qp = NULL;
>-	}
> 	switch (cm_id_priv->state) {
> 	case IW_CM_STATE_ESTABLISHED:
> 	case IW_CM_STATE_CLOSING:
> 		cm_id_priv->state = IW_CM_STATE_IDLE;
>-		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
>-		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
>-		spin_lock_irqsave(&cm_id_priv->lock, flags);
>+		notify_event = 1;
> 		break;
> 	case IW_CM_STATE_DESTROYING:
> 		break;
>@@ -965,6 +967,10 @@ static int cm_close_handler(struct
>iwcm_id_private *cm_id_priv,
> 	}
> 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
> 
>+	if (qp)
>+		cm_id_priv->id.device->ops.iw_rem_ref(qp);
>+	if (notify_event)
>+		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
> 	return ret;
> }
> 
>-- 
>2.23.0.rc0
>
>

That looks good to me.

Thanks,
Bernard.

Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock
  2019-10-07 10:26 [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock Krishnamraju Eraparaju
  2019-10-18 15:19 ` Bernard Metzler
@ 2019-10-18 18:47 ` Doug Ledford
  1 sibling, 0 replies; 3+ messages in thread
From: Doug Ledford @ 2019-10-18 18:47 UTC (permalink / raw)
  To: Krishnamraju Eraparaju, jgg, bmt
  Cc: linux-rdma, bharat, nirranjan, sagi, larrystevenwise

[-- Attachment #1: Type: text/plain, Size: 687 bytes --]

On Mon, 2019-10-07 at 15:56 +0530, Krishnamraju Eraparaju wrote:
> kref release routines usually perform memory release operations,
> hence, they should not be called with spinlocks held.
> one such case is: SIW kref release routine siw_free_qp(), which
> can sleep via vfree() while freeing queue memory.
> 
> Hence, all iw_rem_ref() calls in IWCM are moved out of spinlocks.
> 
> Fixes: 922a8e9fb2e0 ("RDMA: iWARP Connection Manager.")
> Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>

Thanks, applied to for-rc.

-- 
Doug Ledford <dledford@redhat.com>
    GPG KeyID: B826A3330E572FDD
    Fingerprint = AE6B 1BDA 122B 23B4 265B  1274 B826 A333 0E57 2FDD

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-10-18 18:47 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-07 10:26 [PATCH v1,for-rc] RDMA/iwcm: move iw_rem_ref() calls out of spinlock Krishnamraju Eraparaju
2019-10-18 15:19 ` Bernard Metzler
2019-10-18 18:47 ` Doug Ledford

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).