* [PATCH rdma-next] RDMA/cm: Use RCU synchronization mechanism to protect cm_id_private xa_load()
@ 2019-12-19 13:47 Leon Romanovsky
2020-01-03 20:36 ` Jason Gunthorpe
0 siblings, 1 reply; 2+ messages in thread
From: Leon Romanovsky @ 2019-12-19 13:47 UTC (permalink / raw)
To: Doug Ledford, Jason Gunthorpe
Cc: Danit Goldberg, RDMA mailing list, Leon Romanovsky
From: Danit Goldberg <danitg@mellanox.com>
The RCU mechanism is optimized for read-mostly scenarios and therefore
more suitable to protect the cm_id_private to decrease "cm.lock"
congestion.
This patch replaces the existing spinlock locking mechanism
and kfree with RCU mechanism in places where spinlock(cm.lock)
protected cm_id_priv xa_load.
In addition, deletes cm_get_id function and use cm_acquire_id directly
with the correct locking. The patch also removes an open coded version
of cm_get_id, and replaces it with a call to cm_acquire_id.
Signed-off-by: Danit Goldberg <danitg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
drivers/infiniband/core/cm.c | 43 +++++++++++-------------------------
1 file changed, 13 insertions(+), 30 deletions(-)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index d42a3887057b..0ed69e213bf8 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -241,6 +241,7 @@ struct cm_id_private {
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
+ struct rcu_head rcu;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
@@ -593,28 +594,16 @@ static void cm_free_id(__be32 local_id)
xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
}
-static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
+static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
+ rcu_read_lock();
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- refcount_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
-
- return cm_id_priv;
-}
-
-static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
-{
- struct cm_id_private *cm_id_priv;
-
- spin_lock_irq(&cm.lock);
- cm_id_priv = cm_get_id(local_id, remote_id);
- spin_unlock_irq(&cm.lock);
+ if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
+ !refcount_inc_not_zero(&cm_id_priv->refcount))
+ cm_id_priv = NULL;
+ rcu_read_unlock();
return cm_id_priv;
}
@@ -1089,7 +1078,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
kfree(cm_id_priv->private_data);
- kfree(cm_id_priv);
+ kfree_rcu(cm_id_priv, rcu);
}
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
@@ -1851,7 +1840,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
@@ -1865,7 +1854,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
@@ -2336,7 +2325,7 @@ static int cm_rep_handler(struct cm_work *work)
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock(&cm.lock);
@@ -2837,14 +2826,8 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
spin_unlock_irq(&cm.lock);
return NULL;
}
- cm_id_priv = xa_load(&cm.local_id_table,
- cm_local_id(timewait_info->work.local_id));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- refcount_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
+ cm_id_priv =
+ cm_acquire_id(timewait_info->work.local_id, remote_id);
spin_unlock_irq(&cm.lock);
} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) == CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
--
2.20.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH rdma-next] RDMA/cm: Use RCU synchronization mechanism to protect cm_id_private xa_load()
2019-12-19 13:47 [PATCH rdma-next] RDMA/cm: Use RCU synchronization mechanism to protect cm_id_private xa_load() Leon Romanovsky
@ 2020-01-03 20:36 ` Jason Gunthorpe
0 siblings, 0 replies; 2+ messages in thread
From: Jason Gunthorpe @ 2020-01-03 20:36 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Doug Ledford, Danit Goldberg, RDMA mailing list, Leon Romanovsky
On Thu, Dec 19, 2019 at 03:47:50PM +0200, Leon Romanovsky wrote:
> From: Danit Goldberg <danitg@mellanox.com>
>
> The RCU mechanism is optimized for read-mostly scenarios and therefore
> more suitable to protect the cm_id_private to decrease "cm.lock"
> congestion.
>
> This patch replaces the existing spinlock locking mechanism
> and kfree with RCU mechanism in places where spinlock(cm.lock)
> protected cm_id_priv xa_load.
>
> In addition, deletes cm_get_id function and use cm_acquire_id directly
> with the correct locking. The patch also removes an open coded version
> of cm_get_id, and replaces it with a call to cm_acquire_id.
>
> Signed-off-by: Danit Goldberg <danitg@mellanox.com>
> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> ---
> drivers/infiniband/core/cm.c | 43 +++++++++++-------------------------
> 1 file changed, 13 insertions(+), 30 deletions(-)
Applied to for-next, thanks
Jason
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2020-01-03 20:37 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-19 13:47 [PATCH rdma-next] RDMA/cm: Use RCU synchronization mechanism to protect cm_id_private xa_load() Leon Romanovsky
2020-01-03 20:36 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).