linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage
@ 2022-05-06 13:50 Tetsuo Handa
  2022-05-19  9:55 ` Tetsuo Handa
  2022-05-20 14:26 ` Jason Gunthorpe
  0 siblings, 2 replies; 3+ messages in thread
From: Tetsuo Handa @ 2022-05-06 13:50 UTC (permalink / raw)
  To: Yishai Hadas, Jason Gunthorpe, Leon Romanovsky; +Cc: OFED mailing list

Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local cm_wq.

Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Note: This patch is only compile tested.

 drivers/infiniband/hw/mlx4/cm.c      | 29 +++++++++++++++++++++-------
 drivers/infiniband/hw/mlx4/main.c    | 10 +++++++++-
 drivers/infiniband/hw/mlx4/mlx4_ib.h |  3 +++
 3 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 4aff1c8298b1..12b481d138cf 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -80,6 +80,7 @@ struct cm_req_msg {
 	union ib_gid primary_path_sgid;
 };
 
+static struct workqueue_struct *cm_wq;
 
 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
 {
@@ -288,10 +289,10 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
 	/*make sure that there is no schedule inside the scheduled work.*/
 	if (!sriov->is_going_down && !id->scheduled_delete) {
 		id->scheduled_delete = 1;
-		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+		queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 	} else if (id->scheduled_delete) {
 		/* Adjust timeout if already scheduled */
-		mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+		mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 	}
 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
 	spin_unlock(&sriov->id_map_lock);
@@ -370,7 +371,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
 			ret =  xa_err(item);
 		else
 			/* If a retry, adjust delayed work */
-			mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+			mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 		goto err_or_exists;
 	}
 	xa_unlock(&sriov->xa_rej_tmout);
@@ -393,7 +394,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
 		return xa_err(old);
 	}
 
-	schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+	queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
 
 	return 0;
 
@@ -500,7 +501,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
 	xa_lock(&sriov->xa_rej_tmout);
 	xa_for_each(&sriov->xa_rej_tmout, id, item) {
 		if (slave < 0 || slave == item->slave) {
-			mod_delayed_work(system_wq, &item->timeout, 0);
+			mod_delayed_work(cm_wq, &item->timeout, 0);
 			flush_needed = true;
 			++cnt;
 		}
@@ -508,7 +509,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
 	xa_unlock(&sriov->xa_rej_tmout);
 
 	if (flush_needed) {
-		flush_scheduled_work();
+		flush_workqueue(cm_wq);
 		pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
 			 cnt, slave);
 	}
@@ -540,7 +541,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
 	spin_unlock(&sriov->id_map_lock);
 
 	if (need_flush)
-		flush_scheduled_work(); /* make sure all timers were flushed */
+		flush_workqueue(cm_wq); /* make sure all timers were flushed */
 
 	/* now, remove all leftover entries from databases*/
 	spin_lock(&sriov->id_map_lock);
@@ -587,3 +588,17 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
 
 	rej_tmout_xa_cleanup(sriov, slave);
 }
+
+int mlx4_ib_cm_init(void)
+{
+	cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
+	if (!cm_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx4_ib_cm_destroy(void)
+{
+	destroy_workqueue(cm_wq);
+}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c448168375db..ba47874f90d3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3307,10 +3307,14 @@ static int __init mlx4_ib_init(void)
 	if (!wq)
 		return -ENOMEM;
 
-	err = mlx4_ib_mcg_init();
+	err = mlx4_ib_cm_init();
 	if (err)
 		goto clean_wq;
 
+	err = mlx4_ib_mcg_init();
+	if (err)
+		goto clean_cm;
+
 	err = mlx4_register_interface(&mlx4_ib_interface);
 	if (err)
 		goto clean_mcg;
@@ -3320,6 +3324,9 @@ static int __init mlx4_ib_init(void)
 clean_mcg:
 	mlx4_ib_mcg_destroy();
 
+clean_cm:
+	mlx4_ib_cm_destroy();
+
 clean_wq:
 	destroy_workqueue(wq);
 	return err;
@@ -3329,6 +3336,7 @@ static void __exit mlx4_ib_cleanup(void)
 {
 	mlx4_unregister_interface(&mlx4_ib_interface);
 	mlx4_ib_mcg_destroy();
+	mlx4_ib_cm_destroy();
 	destroy_workqueue(wq);
 }
 
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d84023b4b1b8..6a3b0f121045 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -937,4 +937,7 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 				       int *num_of_mtts);
 
+int mlx4_ib_cm_init(void);
+void mlx4_ib_cm_destroy(void);
+
 #endif /* MLX4_IB_H */
-- 
2.34.1

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage
  2022-05-06 13:50 [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage Tetsuo Handa
@ 2022-05-19  9:55 ` Tetsuo Handa
  2022-05-20 14:26 ` Jason Gunthorpe
  1 sibling, 0 replies; 3+ messages in thread
From: Tetsuo Handa @ 2022-05-19  9:55 UTC (permalink / raw)
  To: Yishai Hadas, Jason Gunthorpe, Leon Romanovsky; +Cc: OFED mailing list

Any questions?

On 2022/05/06 22:50, Tetsuo Handa wrote:
> Flushing system-wide workqueues is dangerous and will be forbidden.
> Replace system_wq with local cm_wq.
> 
> Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Note: This patch is only compile tested.
> 
>  drivers/infiniband/hw/mlx4/cm.c      | 29 +++++++++++++++++++++-------
>  drivers/infiniband/hw/mlx4/main.c    | 10 +++++++++-
>  drivers/infiniband/hw/mlx4/mlx4_ib.h |  3 +++
>  3 files changed, 34 insertions(+), 8 deletions(-)
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage
  2022-05-06 13:50 [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage Tetsuo Handa
  2022-05-19  9:55 ` Tetsuo Handa
@ 2022-05-20 14:26 ` Jason Gunthorpe
  1 sibling, 0 replies; 3+ messages in thread
From: Jason Gunthorpe @ 2022-05-20 14:26 UTC (permalink / raw)
  To: Tetsuo Handa; +Cc: Yishai Hadas, Leon Romanovsky, OFED mailing list

On Fri, May 06, 2022 at 10:50:13PM +0900, Tetsuo Handa wrote:
> Flushing system-wide workqueues is dangerous and will be forbidden.
> Replace system_wq with local cm_wq.
> 
> Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Note: This patch is only compile tested.
> 
>  drivers/infiniband/hw/mlx4/cm.c      | 29 +++++++++++++++++++++-------
>  drivers/infiniband/hw/mlx4/main.c    | 10 +++++++++-
>  drivers/infiniband/hw/mlx4/mlx4_ib.h |  3 +++
>  3 files changed, 34 insertions(+), 8 deletions(-)

Applied to for-next, thanks

Jason

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-05-20 14:27 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-06 13:50 [PATCH] RDMA/mlx4: Avoid flush_scheduled_work() usage Tetsuo Handa
2022-05-19  9:55 ` Tetsuo Handa
2022-05-20 14:26 ` Jason Gunthorpe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).