All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2020-12-24 11:05 ` leonid.ravich
  0 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2020-12-24 11:05 UTC (permalink / raw)
  To: james.smart
  Cc: lravich, Leonid Ravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2020-12-24 11:05 ` leonid.ravich
  0 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2020-12-24 11:05 UTC (permalink / raw)
  To: james.smart
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Leonid Ravich, Christoph Hellwig, lravich

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH] nvmet-fc: associations list replaced with hlist rcu,
  2020-12-24 11:05 ` leonid.ravich
@ 2020-12-24 11:05   ` leonid.ravich
  -1 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2020-12-24 11:05 UTC (permalink / raw)
  To: james.smart
  Cc: lravich, Leonid Ravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2020-12-24 11:05   ` leonid.ravich
  0 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2020-12-24 11:05 UTC (permalink / raw)
  To: james.smart
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Leonid Ravich, Christoph Hellwig, lravich

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
  2020-12-24 11:05 ` leonid.ravich
  (?)
@ 2020-12-25 21:09   ` kernel test robot
  -1 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2020-12-25 21:09 UTC (permalink / raw)
  To: leonid.ravich, james.smart
  Cc: kbuild-all, clang-built-linux, lravich, Leonid Ravich,
	Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni, linux-nvme,
	linux-kernel

[-- Attachment #1: Type: text/plain, Size: 9941 bytes --]

Hi,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.10 next-20201223]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 58cf05f597b03a8212d9ecf2c79ee046d3ee8ad9
config: s390-randconfig-r031-20201224 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project cee1e7d14f4628d6174b33640d502bff3b54ae45)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install s390 cross compiling tool for clang build
        # apt-get install binutils-s390x-linux-gnu
        # https://github.com/0day-ci/linux/commit/db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
        git checkout db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=s390 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   include/uapi/linux/swab.h:19:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x000000ffUL) << 24) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:20:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:21:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:22:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:120:12: note: expanded from macro '__swab32'
           __fswab32(x))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:501:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writeb(value, PCI_IOBASE + addr);
                               ~~~~~~~~~~ ^
   include/asm-generic/io.h:511:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:521:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:609:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsb(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:617:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsw(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:625:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsl(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:634:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesb(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:643:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesw(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:652:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesl(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
>> drivers/nvme/target/fc.c:1156:6: warning: no previous prototype for function 'nvmet_assoc_free_queue_rcu' [-Wmissing-prototypes]
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
        ^
   drivers/nvme/target/fc.c:1156:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
   ^
   static 
   drivers/nvme/target/fc.c:175:1: warning: unused function 'nvmet_fc_iodnum' [-Wunused-function]
   nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
   ^
   drivers/nvme/target/fc.c:181:1: warning: unused function 'nvmet_fc_fodnum' [-Wunused-function]
   nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
   ^
   23 warnings generated.


vim +/nvmet_assoc_free_queue_rcu +1156 drivers/nvme/target/fc.c

  1155	
> 1156	void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
  1157		struct nvmet_fc_tgt_assoc *assoc =
  1158			container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
  1159		struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  1160	
  1161		kfree(assoc);
  1162		dev_info(tgtport->dev,
  1163			"{%d:%d} Association freed\n",
  1164			tgtport->fc_target_port.port_num, assoc->a_id);
  1165	}
  1166	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 15248 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2020-12-25 21:09   ` kernel test robot
  0 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2020-12-25 21:09 UTC (permalink / raw)
  To: leonid.ravich, james.smart
  Cc: kbuild-all, Sagi Grimberg, Chaitanya Kulkarni, linux-kernel,
	linux-nvme, clang-built-linux, Leonid Ravich, Christoph Hellwig,
	lravich

[-- Attachment #1: Type: text/plain, Size: 9941 bytes --]

Hi,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.10 next-20201223]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 58cf05f597b03a8212d9ecf2c79ee046d3ee8ad9
config: s390-randconfig-r031-20201224 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project cee1e7d14f4628d6174b33640d502bff3b54ae45)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install s390 cross compiling tool for clang build
        # apt-get install binutils-s390x-linux-gnu
        # https://github.com/0day-ci/linux/commit/db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
        git checkout db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=s390 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   include/uapi/linux/swab.h:19:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x000000ffUL) << 24) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:20:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:21:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:22:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:120:12: note: expanded from macro '__swab32'
           __fswab32(x))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:501:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writeb(value, PCI_IOBASE + addr);
                               ~~~~~~~~~~ ^
   include/asm-generic/io.h:511:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:521:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:609:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsb(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:617:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsw(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:625:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsl(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:634:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesb(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:643:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesw(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:652:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesl(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
>> drivers/nvme/target/fc.c:1156:6: warning: no previous prototype for function 'nvmet_assoc_free_queue_rcu' [-Wmissing-prototypes]
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
        ^
   drivers/nvme/target/fc.c:1156:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
   ^
   static 
   drivers/nvme/target/fc.c:175:1: warning: unused function 'nvmet_fc_iodnum' [-Wunused-function]
   nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
   ^
   drivers/nvme/target/fc.c:181:1: warning: unused function 'nvmet_fc_fodnum' [-Wunused-function]
   nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
   ^
   23 warnings generated.


vim +/nvmet_assoc_free_queue_rcu +1156 drivers/nvme/target/fc.c

  1155	
> 1156	void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
  1157		struct nvmet_fc_tgt_assoc *assoc =
  1158			container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
  1159		struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  1160	
  1161		kfree(assoc);
  1162		dev_info(tgtport->dev,
  1163			"{%d:%d} Association freed\n",
  1164			tgtport->fc_target_port.port_num, assoc->a_id);
  1165	}
  1166	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 15248 bytes --]

[-- Attachment #3: Type: text/plain, Size: 158 bytes --]

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2020-12-25 21:09   ` kernel test robot
  0 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2020-12-25 21:09 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 10108 bytes --]

Hi,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.10 next-20201223]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 58cf05f597b03a8212d9ecf2c79ee046d3ee8ad9
config: s390-randconfig-r031-20201224 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project cee1e7d14f4628d6174b33640d502bff3b54ae45)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install s390 cross compiling tool for clang build
        # apt-get install binutils-s390x-linux-gnu
        # https://github.com/0day-ci/linux/commit/db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review leonid-ravich-dell-com/nvmet-fc-associations-list-replaced-with-hlist-rcu/20201224-191206
        git checkout db9dbc6efce5ef6533984b7fbe395b365d71ce7f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=s390 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   include/uapi/linux/swab.h:19:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x000000ffUL) << 24) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:20:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:21:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |            \
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:119:21: note: expanded from macro '__swab32'
           ___constant_swab32(x) :                 \
                              ^
   include/uapi/linux/swab.h:22:12: note: expanded from macro '___constant_swab32'
           (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:490:61: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
                                                           ~~~~~~~~~~ ^
   include/uapi/linux/byteorder/big_endian.h:34:59: note: expanded from macro '__le32_to_cpu'
   #define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
                                                             ^
   include/uapi/linux/swab.h:120:12: note: expanded from macro '__swab32'
           __fswab32(x))
                     ^
   In file included from drivers/nvme/target/fc.c:8:
   In file included from include/linux/blk-mq.h:5:
   In file included from include/linux/blkdev.h:26:
   In file included from include/linux/scatterlist.h:9:
   In file included from arch/s390/include/asm/io.h:80:
   include/asm-generic/io.h:501:33: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writeb(value, PCI_IOBASE + addr);
                               ~~~~~~~~~~ ^
   include/asm-generic/io.h:511:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:521:59: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
                                                         ~~~~~~~~~~ ^
   include/asm-generic/io.h:609:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsb(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:617:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsw(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:625:20: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           readsl(PCI_IOBASE + addr, buffer, count);
                  ~~~~~~~~~~ ^
   include/asm-generic/io.h:634:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesb(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:643:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesw(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
   include/asm-generic/io.h:652:21: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic]
           writesl(PCI_IOBASE + addr, buffer, count);
                   ~~~~~~~~~~ ^
>> drivers/nvme/target/fc.c:1156:6: warning: no previous prototype for function 'nvmet_assoc_free_queue_rcu' [-Wmissing-prototypes]
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
        ^
   drivers/nvme/target/fc.c:1156:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
   ^
   static 
   drivers/nvme/target/fc.c:175:1: warning: unused function 'nvmet_fc_iodnum' [-Wunused-function]
   nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
   ^
   drivers/nvme/target/fc.c:181:1: warning: unused function 'nvmet_fc_fodnum' [-Wunused-function]
   nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
   ^
   23 warnings generated.


vim +/nvmet_assoc_free_queue_rcu +1156 drivers/nvme/target/fc.c

  1155	
> 1156	void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
  1157		struct nvmet_fc_tgt_assoc *assoc =
  1158			container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
  1159		struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  1160	
  1161		kfree(assoc);
  1162		dev_info(tgtport->dev,
  1163			"{%d:%d} Association freed\n",
  1164			tgtport->fc_target_port.port_num, assoc->a_id);
  1165	}
  1166	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 15248 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
  2020-12-24 11:05 ` leonid.ravich
@ 2021-01-03 18:12   ` leonid.ravich
  -1 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2021-01-03 18:12 UTC (permalink / raw)
  To: james.smart
  Cc: lravich, Leonid Ravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

From: Leonid Ravich <Leonid.Ravich@emc.com>

searching assoc_list protected by rcu_read_lock if list not changed inline.
and according to the rcu list rules.

queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
according to rcu dereference/assign rules.

queue and assoc object freed after grace period by call_rcu.

tgtport lock taken for changing assoc_list.

Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
1) fixed sytle issus 
2) queues array protects by rcu as well

 drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 43 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73a..c14c60b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
 	struct list_head		avail_defer_list;
 	struct workqueue_struct		*work_q;
 	struct kref			ref;
+	struct rcu_head			rcu;
 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
 } __aligned(sizeof(unsigned long long));
 
@@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head			rcu;
 };
 
 
@@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			u16 qid, u16 sqsize)
 {
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int ret;
 
 	if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		goto out_fail_iodlist;
 
 	WARN_ON(assoc->queues[qid]);
-	spin_lock_irqsave(&assoc->tgtport->lock, flags);
-	assoc->queues[qid] = queue;
-	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+	rcu_assign_pointer(assoc->queues[qid], queue);
 
 	return queue;
 
@@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_queue *queue =
 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
-	unsigned long flags;
 
-	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
-	queue->assoc->queues[queue->qid] = NULL;
-	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
 
 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
 
@@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	destroy_workqueue(queue->work_q);
 
-	kfree(queue);
+	kfree_rcu(queue, rcu);
 }
 
 static void
@@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
-			queue = assoc->queues[qid];
+			queue = rcu_dereference(assoc->queues[qid]);
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	list_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	dev_info(tgtport->dev,
 		"{%d:%d} Association freed\n",
 		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	kfree_rcu(assoc, rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int i, terminating;
 
 	terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	if (terminating)
 		return;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
+
 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
-		queue = assoc->queues[i];
-		if (queue) {
-			if (!nvmet_fc_tgt_q_get(queue))
-				continue;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
-			nvmet_fc_delete_target_queue(queue);
-			nvmet_fc_tgt_q_put(queue);
-			spin_lock_irqsave(&tgtport->lock, flags);
+		rcu_read_lock();
+		queue = rcu_dereference(assoc->queues[i]);
+		if (!queue) {
+			rcu_read_unlock();
+			continue;
+		}
+
+		if (!nvmet_fc_tgt_q_get(queue)) {
+			rcu_read_unlock();
+			continue;
 		}
+		rcu_read_unlock();
+		nvmet_fc_delete_target_queue(queue);
+		nvmet_fc_tgt_q_put(queue);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
 
 	dev_info(tgtport->dev,
 		"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_assoc *assoc;
 	struct nvmet_fc_tgt_assoc *ret = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 
 	return ret;
 }
@@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
-	unsigned long flags;
+	struct nvmet_fc_tgt_assoc *assoc;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
-				&tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
 		if (!schedule_work(&assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 }
 
 /**
@@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			continue;
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
-		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
-			queue = assoc->queues[0];
+		rcu_read_lock();
+		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+			queue = rcu_dereference(assoc->queues[0]);
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
 					found_ctrl = true;
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&tgtport->lock, flags);
+		rcu_read_unlock();
 
 		nvmet_fc_tgtport_put(tgtport);
 
-- 
1.9.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
@ 2021-01-03 18:12   ` leonid.ravich
  0 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2021-01-03 18:12 UTC (permalink / raw)
  To: james.smart
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Leonid Ravich, Christoph Hellwig, lravich

From: Leonid Ravich <Leonid.Ravich@emc.com>

searching assoc_list protected by rcu_read_lock if list not changed inline.
and according to the rcu list rules.

queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
according to rcu dereference/assign rules.

queue and assoc object freed after grace period by call_rcu.

tgtport lock taken for changing assoc_list.

Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
1) fixed sytle issus 
2) queues array protects by rcu as well

 drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 43 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73a..c14c60b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
 	struct list_head		avail_defer_list;
 	struct workqueue_struct		*work_q;
 	struct kref			ref;
+	struct rcu_head			rcu;
 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
 } __aligned(sizeof(unsigned long long));
 
@@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head			rcu;
 };
 
 
@@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			u16 qid, u16 sqsize)
 {
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int ret;
 
 	if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		goto out_fail_iodlist;
 
 	WARN_ON(assoc->queues[qid]);
-	spin_lock_irqsave(&assoc->tgtport->lock, flags);
-	assoc->queues[qid] = queue;
-	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+	rcu_assign_pointer(assoc->queues[qid], queue);
 
 	return queue;
 
@@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_queue *queue =
 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
-	unsigned long flags;
 
-	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
-	queue->assoc->queues[queue->qid] = NULL;
-	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
 
 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
 
@@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	destroy_workqueue(queue->work_q);
 
-	kfree(queue);
+	kfree_rcu(queue, rcu);
 }
 
 static void
@@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
-			queue = assoc->queues[qid];
+			queue = rcu_dereference(assoc->queues[qid]);
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	list_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	dev_info(tgtport->dev,
 		"{%d:%d} Association freed\n",
 		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	kfree_rcu(assoc, rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int i, terminating;
 
 	terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	if (terminating)
 		return;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
+
 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
-		queue = assoc->queues[i];
-		if (queue) {
-			if (!nvmet_fc_tgt_q_get(queue))
-				continue;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
-			nvmet_fc_delete_target_queue(queue);
-			nvmet_fc_tgt_q_put(queue);
-			spin_lock_irqsave(&tgtport->lock, flags);
+		rcu_read_lock();
+		queue = rcu_dereference(assoc->queues[i]);
+		if (!queue) {
+			rcu_read_unlock();
+			continue;
+		}
+
+		if (!nvmet_fc_tgt_q_get(queue)) {
+			rcu_read_unlock();
+			continue;
 		}
+		rcu_read_unlock();
+		nvmet_fc_delete_target_queue(queue);
+		nvmet_fc_tgt_q_put(queue);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
 
 	dev_info(tgtport->dev,
 		"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_assoc *assoc;
 	struct nvmet_fc_tgt_assoc *ret = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 
 	return ret;
 }
@@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
-	unsigned long flags;
+	struct nvmet_fc_tgt_assoc *assoc;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
-				&tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
 		if (!schedule_work(&assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 }
 
 /**
@@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			continue;
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
-		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
-			queue = assoc->queues[0];
+		rcu_read_lock();
+		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+			queue = rcu_dereference(assoc->queues[0]);
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
 					found_ctrl = true;
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&tgtport->lock, flags);
+		rcu_read_unlock();
 
 		nvmet_fc_tgtport_put(tgtport);
 
-- 
1.9.3


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
  2021-01-03 18:12   ` leonid.ravich
@ 2021-01-03 18:12     ` leonid.ravich
  -1 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2021-01-03 18:12 UTC (permalink / raw)
  To: james.smart
  Cc: lravich, Leonid Ravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

From: Leonid Ravich <Leonid.Ravich@emc.com>

searching assoc_list protected by rcu_read_lock if list not changed inline.
and according to the rcu list rules.

queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
according to rcu dereference/assign rules.

queue and assoc object freed after grace period by call_rcu.

tgtport lock taken for changing assoc_list.

Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
1) fixed sytle issus 
2) queues array protects by rcu as well

 drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 43 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73a..c14c60b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
 	struct list_head		avail_defer_list;
 	struct workqueue_struct		*work_q;
 	struct kref			ref;
+	struct rcu_head			rcu;
 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
 } __aligned(sizeof(unsigned long long));
 
@@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head			rcu;
 };
 
 
@@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			u16 qid, u16 sqsize)
 {
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int ret;
 
 	if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		goto out_fail_iodlist;
 
 	WARN_ON(assoc->queues[qid]);
-	spin_lock_irqsave(&assoc->tgtport->lock, flags);
-	assoc->queues[qid] = queue;
-	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+	rcu_assign_pointer(assoc->queues[qid], queue);
 
 	return queue;
 
@@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_queue *queue =
 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
-	unsigned long flags;
 
-	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
-	queue->assoc->queues[queue->qid] = NULL;
-	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
 
 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
 
@@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	destroy_workqueue(queue->work_q);
 
-	kfree(queue);
+	kfree_rcu(queue, rcu);
 }
 
 static void
@@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
-			queue = assoc->queues[qid];
+			queue = rcu_dereference(assoc->queues[qid]);
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	list_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	dev_info(tgtport->dev,
 		"{%d:%d} Association freed\n",
 		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	kfree_rcu(assoc, rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int i, terminating;
 
 	terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	if (terminating)
 		return;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
+
 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
-		queue = assoc->queues[i];
-		if (queue) {
-			if (!nvmet_fc_tgt_q_get(queue))
-				continue;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
-			nvmet_fc_delete_target_queue(queue);
-			nvmet_fc_tgt_q_put(queue);
-			spin_lock_irqsave(&tgtport->lock, flags);
+		rcu_read_lock();
+		queue = rcu_dereference(assoc->queues[i]);
+		if (!queue) {
+			rcu_read_unlock();
+			continue;
+		}
+
+		if (!nvmet_fc_tgt_q_get(queue)) {
+			rcu_read_unlock();
+			continue;
 		}
+		rcu_read_unlock();
+		nvmet_fc_delete_target_queue(queue);
+		nvmet_fc_tgt_q_put(queue);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
 
 	dev_info(tgtport->dev,
 		"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_assoc *assoc;
 	struct nvmet_fc_tgt_assoc *ret = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 
 	return ret;
 }
@@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
-	unsigned long flags;
+	struct nvmet_fc_tgt_assoc *assoc;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
-				&tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
 		if (!schedule_work(&assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 }
 
 /**
@@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			continue;
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
-		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
-			queue = assoc->queues[0];
+		rcu_read_lock();
+		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+			queue = rcu_dereference(assoc->queues[0]);
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
 					found_ctrl = true;
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&tgtport->lock, flags);
+		rcu_read_unlock();
 
 		nvmet_fc_tgtport_put(tgtport);
 
-- 
1.9.3


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
@ 2021-01-03 18:12     ` leonid.ravich
  0 siblings, 0 replies; 19+ messages in thread
From: leonid.ravich @ 2021-01-03 18:12 UTC (permalink / raw)
  To: james.smart
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Leonid Ravich, Christoph Hellwig, lravich

From: Leonid Ravich <Leonid.Ravich@emc.com>

searching assoc_list protected by rcu_read_lock if list not changed inline.
and according to the rcu list rules.

queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
according to rcu dereference/assign rules.

queue and assoc object freed after grace period by call_rcu.

tgtport lock taken for changing assoc_list.

Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
1) fixed sytle issus 
2) queues array protects by rcu as well

 drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
 1 file changed, 38 insertions(+), 43 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73a..c14c60b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
 	struct list_head		avail_defer_list;
 	struct workqueue_struct		*work_q;
 	struct kref			ref;
+	struct rcu_head			rcu;
 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
 } __aligned(sizeof(unsigned long long));
 
@@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head			rcu;
 };
 
 
@@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			u16 qid, u16 sqsize)
 {
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int ret;
 
 	if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		goto out_fail_iodlist;
 
 	WARN_ON(assoc->queues[qid]);
-	spin_lock_irqsave(&assoc->tgtport->lock, flags);
-	assoc->queues[qid] = queue;
-	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+	rcu_assign_pointer(assoc->queues[qid], queue);
 
 	return queue;
 
@@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_queue *queue =
 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
-	unsigned long flags;
 
-	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
-	queue->assoc->queues[queue->qid] = NULL;
-	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
 
 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
 
@@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	destroy_workqueue(queue->work_q);
 
-	kfree(queue);
+	kfree_rcu(queue, rcu);
 }
 
 static void
@@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
-			queue = assoc->queues[qid];
+			queue = rcu_dereference(assoc->queues[qid]);
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	list_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	dev_info(tgtport->dev,
 		"{%d:%d} Association freed\n",
 		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	kfree_rcu(assoc, rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 	struct nvmet_fc_tgt_queue *queue;
-	unsigned long flags;
 	int i, terminating;
 
 	terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	if (terminating)
 		return;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
+
 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
-		queue = assoc->queues[i];
-		if (queue) {
-			if (!nvmet_fc_tgt_q_get(queue))
-				continue;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
-			nvmet_fc_delete_target_queue(queue);
-			nvmet_fc_tgt_q_put(queue);
-			spin_lock_irqsave(&tgtport->lock, flags);
+		rcu_read_lock();
+		queue = rcu_dereference(assoc->queues[i]);
+		if (!queue) {
+			rcu_read_unlock();
+			continue;
+		}
+
+		if (!nvmet_fc_tgt_q_get(queue)) {
+			rcu_read_unlock();
+			continue;
 		}
+		rcu_read_unlock();
+		nvmet_fc_delete_target_queue(queue);
+		nvmet_fc_tgt_q_put(queue);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
 
 	dev_info(tgtport->dev,
 		"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 {
 	struct nvmet_fc_tgt_assoc *assoc;
 	struct nvmet_fc_tgt_assoc *ret = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 
 	return ret;
 }
@@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
-	unsigned long flags;
+	struct nvmet_fc_tgt_assoc *assoc;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
-				&tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
 		if (!schedule_work(&assoc->del_work))
 			/* already deleting - release local reference */
 			nvmet_fc_tgt_a_put(assoc);
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 }
 
 /**
@@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 			continue;
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
-		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
-			queue = assoc->queues[0];
+		rcu_read_lock();
+		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+			queue = rcu_dereference(assoc->queues[0]);
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
 					found_ctrl = true;
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&tgtport->lock, flags);
+		rcu_read_unlock();
 
 		nvmet_fc_tgtport_put(tgtport);
 
-- 
1.9.3


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
  2021-01-03 18:12   ` leonid.ravich
@ 2021-01-11  8:11     ` Christoph Hellwig
  -1 siblings, 0 replies; 19+ messages in thread
From: Christoph Hellwig @ 2021-01-11  8:11 UTC (permalink / raw)
  To: leonid.ravich
  Cc: james.smart, lravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

James, can you review this patch?

On Sun, Jan 03, 2021 at 08:12:53PM +0200, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
> 
> searching assoc_list protected by rcu_read_lock if list not changed inline.
> and according to the rcu list rules.
> 
> queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
> according to rcu dereference/assign rules.
> 
> queue and assoc object freed after grace period by call_rcu.
> 
> tgtport lock taken for changing assoc_list.
> 
> Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
> Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
> 1) fixed sytle issus 
> 2) queues array protects by rcu as well
> 
>  drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
>  1 file changed, 38 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
> index cd4e73a..c14c60b 100644
> --- a/drivers/nvme/target/fc.c
> +++ b/drivers/nvme/target/fc.c
> @@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
>  	struct list_head		avail_defer_list;
>  	struct workqueue_struct		*work_q;
>  	struct kref			ref;
> +	struct rcu_head			rcu;
>  	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
>  } __aligned(sizeof(unsigned long long));
>  
> @@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
>  	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
>  	struct kref			ref;
>  	struct work_struct		del_work;
> +	struct rcu_head			rcu;
>  };
>  
>  
> @@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			u16 qid, u16 sqsize)
>  {
>  	struct nvmet_fc_tgt_queue *queue;
> -	unsigned long flags;
>  	int ret;
>  
>  	if (qid > NVMET_NR_QUEUES)
> @@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  		goto out_fail_iodlist;
>  
>  	WARN_ON(assoc->queues[qid]);
> -	spin_lock_irqsave(&assoc->tgtport->lock, flags);
> -	assoc->queues[qid] = queue;
> -	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
> +	rcu_assign_pointer(assoc->queues[qid], queue);
>  
>  	return queue;
>  
> @@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgt_queue *queue =
>  		container_of(ref, struct nvmet_fc_tgt_queue, ref);
> -	unsigned long flags;
>  
> -	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
> -	queue->assoc->queues[queue->qid] = NULL;
> -	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
> +	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
>  
>  	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
>  
> @@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  
>  	destroy_workqueue(queue->work_q);
>  
> -	kfree(queue);
> +	kfree_rcu(queue, rcu);
>  }
>  
>  static void
> @@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	struct nvmet_fc_tgt_queue *queue;
>  	u64 association_id = nvmet_fc_getassociationid(connection_id);
>  	u16 qid = nvmet_fc_getqueueid(connection_id);
> -	unsigned long flags;
>  
>  	if (qid > NVMET_NR_QUEUES)
>  		return NULL;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (association_id == assoc->association_id) {
> -			queue = assoc->queues[qid];
> +			queue = rcu_dereference(assoc->queues[qid]);
>  			if (queue &&
>  			    (!atomic_read(&queue->connected) ||
>  			     !nvmet_fc_tgt_q_get(queue)))
>  				queue = NULL;
> -			spin_unlock_irqrestore(&tgtport->lock, flags);
> +			rcu_read_unlock();
>  			return queue;
>  		}
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  	return NULL;
>  }
>  
> @@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  		}
>  		if (!needrandom) {
>  			assoc->association_id = ran;
> -			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
> +			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
>  		}
>  		spin_unlock_irqrestore(&tgtport->lock, flags);
>  	}
> @@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  
>  	nvmet_fc_free_hostport(assoc->hostport);
>  	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_del(&assoc->a_list);
> +	list_del_rcu(&assoc->a_list);
>  	oldls = assoc->rcv_disconn;
>  	spin_unlock_irqrestore(&tgtport->lock, flags);
>  	/* if pending Rcv Disconnect Association LS, send rsp now */
> @@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	dev_info(tgtport->dev,
>  		"{%d:%d} Association freed\n",
>  		tgtport->fc_target_port.port_num, assoc->a_id);
> -	kfree(assoc);
> +	kfree_rcu(assoc, rcu);
>  	nvmet_fc_tgtport_put(tgtport);
>  }
>  
> @@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
>  	struct nvmet_fc_tgt_queue *queue;
> -	unsigned long flags;
>  	int i, terminating;
>  
>  	terminating = atomic_xchg(&assoc->terminating, 1);
> @@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	if (terminating)
>  		return;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> +
>  	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
> -		queue = assoc->queues[i];
> -		if (queue) {
> -			if (!nvmet_fc_tgt_q_get(queue))
> -				continue;
> -			spin_unlock_irqrestore(&tgtport->lock, flags);
> -			nvmet_fc_delete_target_queue(queue);
> -			nvmet_fc_tgt_q_put(queue);
> -			spin_lock_irqsave(&tgtport->lock, flags);
> +		rcu_read_lock();
> +		queue = rcu_dereference(assoc->queues[i]);
> +		if (!queue) {
> +			rcu_read_unlock();
> +			continue;
> +		}
> +
> +		if (!nvmet_fc_tgt_q_get(queue)) {
> +			rcu_read_unlock();
> +			continue;
>  		}
> +		rcu_read_unlock();
> +		nvmet_fc_delete_target_queue(queue);
> +		nvmet_fc_tgt_q_put(queue);
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
>  
>  	dev_info(tgtport->dev,
>  		"{%d:%d} Association deleted\n",
> @@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgt_assoc *assoc;
>  	struct nvmet_fc_tgt_assoc *ret = NULL;
> -	unsigned long flags;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (association_id == assoc->association_id) {
>  			ret = assoc;
>  			if (!nvmet_fc_tgt_a_get(assoc))
> @@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			break;
>  		}
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  
>  	return ret;
>  }
> @@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  static void
>  __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
>  {
> -	struct nvmet_fc_tgt_assoc *assoc, *next;
> -	unsigned long flags;
> +	struct nvmet_fc_tgt_assoc *assoc;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry_safe(assoc, next,
> -				&tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (!nvmet_fc_tgt_a_get(assoc))
>  			continue;
>  		if (!schedule_work(&assoc->del_work))
>  			/* already deleting - release local reference */
>  			nvmet_fc_tgt_a_put(assoc);
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  }
>  
>  /**
> @@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			continue;
>  		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
>  
> -		spin_lock_irqsave(&tgtport->lock, flags);
> -		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> -			queue = assoc->queues[0];
> +		rcu_read_lock();
> +		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
> +			queue = rcu_dereference(assoc->queues[0]);
>  			if (queue && queue->nvme_sq.ctrl == ctrl) {
>  				if (nvmet_fc_tgt_a_get(assoc))
>  					found_ctrl = true;
>  				break;
>  			}
>  		}
> -		spin_unlock_irqrestore(&tgtport->lock, flags);
> +		rcu_read_unlock();
>  
>  		nvmet_fc_tgtport_put(tgtport);
>  
> -- 
> 1.9.3
---end quoted text---

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
@ 2021-01-11  8:11     ` Christoph Hellwig
  0 siblings, 0 replies; 19+ messages in thread
From: Christoph Hellwig @ 2021-01-11  8:11 UTC (permalink / raw)
  To: leonid.ravich
  Cc: Sagi Grimberg, Chaitanya Kulkarni, james.smart, linux-nvme,
	linux-kernel, Christoph Hellwig, lravich

James, can you review this patch?

On Sun, Jan 03, 2021 at 08:12:53PM +0200, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
> 
> searching assoc_list protected by rcu_read_lock if list not changed inline.
> and according to the rcu list rules.
> 
> queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
> according to rcu dereference/assign rules.
> 
> queue and assoc object freed after grace period by call_rcu.
> 
> tgtport lock taken for changing assoc_list.
> 
> Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
> Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
> 1) fixed sytle issus 
> 2) queues array protects by rcu as well
> 
>  drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
>  1 file changed, 38 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
> index cd4e73a..c14c60b 100644
> --- a/drivers/nvme/target/fc.c
> +++ b/drivers/nvme/target/fc.c
> @@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
>  	struct list_head		avail_defer_list;
>  	struct workqueue_struct		*work_q;
>  	struct kref			ref;
> +	struct rcu_head			rcu;
>  	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
>  } __aligned(sizeof(unsigned long long));
>  
> @@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
>  	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
>  	struct kref			ref;
>  	struct work_struct		del_work;
> +	struct rcu_head			rcu;
>  };
>  
>  
> @@ -790,7 +792,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			u16 qid, u16 sqsize)
>  {
>  	struct nvmet_fc_tgt_queue *queue;
> -	unsigned long flags;
>  	int ret;
>  
>  	if (qid > NVMET_NR_QUEUES)
> @@ -829,9 +830,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  		goto out_fail_iodlist;
>  
>  	WARN_ON(assoc->queues[qid]);
> -	spin_lock_irqsave(&assoc->tgtport->lock, flags);
> -	assoc->queues[qid] = queue;
> -	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
> +	rcu_assign_pointer(assoc->queues[qid], queue);
>  
>  	return queue;
>  
> @@ -851,11 +850,8 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgt_queue *queue =
>  		container_of(ref, struct nvmet_fc_tgt_queue, ref);
> -	unsigned long flags;
>  
> -	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
> -	queue->assoc->queues[queue->qid] = NULL;
> -	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
> +	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
>  
>  	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
>  
> @@ -863,7 +859,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  
>  	destroy_workqueue(queue->work_q);
>  
> -	kfree(queue);
> +	kfree_rcu(queue, rcu);
>  }
>  
>  static void
> @@ -965,24 +961,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	struct nvmet_fc_tgt_queue *queue;
>  	u64 association_id = nvmet_fc_getassociationid(connection_id);
>  	u16 qid = nvmet_fc_getqueueid(connection_id);
> -	unsigned long flags;
>  
>  	if (qid > NVMET_NR_QUEUES)
>  		return NULL;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (association_id == assoc->association_id) {
> -			queue = assoc->queues[qid];
> +			queue = rcu_dereference(assoc->queues[qid]);
>  			if (queue &&
>  			    (!atomic_read(&queue->connected) ||
>  			     !nvmet_fc_tgt_q_get(queue)))
>  				queue = NULL;
> -			spin_unlock_irqrestore(&tgtport->lock, flags);
> +			rcu_read_unlock();
>  			return queue;
>  		}
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  	return NULL;
>  }
>  
> @@ -1137,7 +1132,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  		}
>  		if (!needrandom) {
>  			assoc->association_id = ran;
> -			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
> +			list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
>  		}
>  		spin_unlock_irqrestore(&tgtport->lock, flags);
>  	}
> @@ -1167,7 +1162,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  
>  	nvmet_fc_free_hostport(assoc->hostport);
>  	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_del(&assoc->a_list);
> +	list_del_rcu(&assoc->a_list);
>  	oldls = assoc->rcv_disconn;
>  	spin_unlock_irqrestore(&tgtport->lock, flags);
>  	/* if pending Rcv Disconnect Association LS, send rsp now */
> @@ -1177,7 +1172,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	dev_info(tgtport->dev,
>  		"{%d:%d} Association freed\n",
>  		tgtport->fc_target_port.port_num, assoc->a_id);
> -	kfree(assoc);
> +	kfree_rcu(assoc, rcu);
>  	nvmet_fc_tgtport_put(tgtport);
>  }
>  
> @@ -1198,7 +1193,6 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
>  	struct nvmet_fc_tgt_queue *queue;
> -	unsigned long flags;
>  	int i, terminating;
>  
>  	terminating = atomic_xchg(&assoc->terminating, 1);
> @@ -1207,19 +1201,23 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  	if (terminating)
>  		return;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> +
>  	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
> -		queue = assoc->queues[i];
> -		if (queue) {
> -			if (!nvmet_fc_tgt_q_get(queue))
> -				continue;
> -			spin_unlock_irqrestore(&tgtport->lock, flags);
> -			nvmet_fc_delete_target_queue(queue);
> -			nvmet_fc_tgt_q_put(queue);
> -			spin_lock_irqsave(&tgtport->lock, flags);
> +		rcu_read_lock();
> +		queue = rcu_dereference(assoc->queues[i]);
> +		if (!queue) {
> +			rcu_read_unlock();
> +			continue;
> +		}
> +
> +		if (!nvmet_fc_tgt_q_get(queue)) {
> +			rcu_read_unlock();
> +			continue;
>  		}
> +		rcu_read_unlock();
> +		nvmet_fc_delete_target_queue(queue);
> +		nvmet_fc_tgt_q_put(queue);
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
>  
>  	dev_info(tgtport->dev,
>  		"{%d:%d} Association deleted\n",
> @@ -1234,10 +1232,9 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  {
>  	struct nvmet_fc_tgt_assoc *assoc;
>  	struct nvmet_fc_tgt_assoc *ret = NULL;
> -	unsigned long flags;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (association_id == assoc->association_id) {
>  			ret = assoc;
>  			if (!nvmet_fc_tgt_a_get(assoc))
> @@ -1245,7 +1242,7 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			break;
>  		}
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  
>  	return ret;
>  }
> @@ -1473,19 +1470,17 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  static void
>  __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
>  {
> -	struct nvmet_fc_tgt_assoc *assoc, *next;
> -	unsigned long flags;
> +	struct nvmet_fc_tgt_assoc *assoc;
>  
> -	spin_lock_irqsave(&tgtport->lock, flags);
> -	list_for_each_entry_safe(assoc, next,
> -				&tgtport->assoc_list, a_list) {
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
>  		if (!nvmet_fc_tgt_a_get(assoc))
>  			continue;
>  		if (!schedule_work(&assoc->del_work))
>  			/* already deleting - release local reference */
>  			nvmet_fc_tgt_a_put(assoc);
>  	}
> -	spin_unlock_irqrestore(&tgtport->lock, flags);
> +	rcu_read_unlock();
>  }
>  
>  /**
> @@ -1568,16 +1563,16 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
>  			continue;
>  		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
>  
> -		spin_lock_irqsave(&tgtport->lock, flags);
> -		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
> -			queue = assoc->queues[0];
> +		rcu_read_lock();
> +		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
> +			queue = rcu_dereference(assoc->queues[0]);
>  			if (queue && queue->nvme_sq.ctrl == ctrl) {
>  				if (nvmet_fc_tgt_a_get(assoc))
>  					found_ctrl = true;
>  				break;
>  			}
>  		}
> -		spin_unlock_irqrestore(&tgtport->lock, flags);
> +		rcu_read_unlock();
>  
>  		nvmet_fc_tgtport_put(tgtport);
>  
> -- 
> 1.9.3
---end quoted text---

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
  2020-12-24 11:05   ` leonid.ravich
@ 2021-01-11 18:12     ` James Smart
  -1 siblings, 0 replies; 19+ messages in thread
From: James Smart @ 2021-01-11 18:12 UTC (permalink / raw)
  To: leonid.ravich
  Cc: lravich, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
	linux-nvme, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 1211 bytes --]

On 12/24/2020 3:05 AM, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
>
> to remove locking from nvmet_fc_find_target_queue
> which called per IO.
>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
>   drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
>   1 file changed, 32 insertions(+), 22 deletions(-)

was this replaced by the v2 patch ?

-- james



-- 
This electronic communication and the information and any files transmitted 
with it, or attached to it, are confidential and are intended solely for 
the use of the individual or entity to whom it is addressed and may contain 
information that is confidential, legally privileged, protected by privacy 
laws, or otherwise restricted from disclosure to anyone else. If you are 
not the intended recipient or the person responsible for delivering the 
e-mail to the intended recipient, you are hereby notified that any use, 
copying, distributing, dissemination, forwarding, printing, or copying of 
this e-mail is strictly prohibited. If you received this e-mail in error, 
please return the e-mail to the sender, delete it from your computer, and 
destroy any printed copy of it.

[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4163 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
@ 2021-01-11 18:12     ` James Smart
  0 siblings, 0 replies; 19+ messages in thread
From: James Smart @ 2021-01-11 18:12 UTC (permalink / raw)
  To: leonid.ravich
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Christoph Hellwig, lravich


[-- Attachment #1.1: Type: text/plain, Size: 1211 bytes --]

On 12/24/2020 3:05 AM, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
>
> to remove locking from nvmet_fc_find_target_queue
> which called per IO.
>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
>   drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
>   1 file changed, 32 insertions(+), 22 deletions(-)

was this replaced by the v2 patch ?

-- james



-- 
This electronic communication and the information and any files transmitted 
with it, or attached to it, are confidential and are intended solely for 
the use of the individual or entity to whom it is addressed and may contain 
information that is confidential, legally privileged, protected by privacy 
laws, or otherwise restricted from disclosure to anyone else. If you are 
not the intended recipient or the person responsible for delivering the 
e-mail to the intended recipient, you are hereby notified that any use, 
copying, distributing, dissemination, forwarding, printing, or copying of 
this e-mail is strictly prohibited. If you received this e-mail in error, 
please return the e-mail to the sender, delete it from your computer, and 
destroy any printed copy of it.

[-- Attachment #1.2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4163 bytes --]

[-- Attachment #2: Type: text/plain, Size: 158 bytes --]

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
  2021-01-03 18:12     ` leonid.ravich
@ 2021-01-11 23:46       ` James Smart
  -1 siblings, 0 replies; 19+ messages in thread
From: James Smart @ 2021-01-11 23:46 UTC (permalink / raw)
  To: leonid.ravich
  Cc: lravich, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
	linux-nvme, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 888 bytes --]



On 1/3/2021 10:12 AM, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
>
> searching assoc_list protected by rcu_read_lock if list not changed inline.
> and according to the rcu list rules.
>
> queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
> according to rcu dereference/assign rules.
>
> queue and assoc object freed after grace period by call_rcu.
>
> tgtport lock taken for changing assoc_list.
>
> Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
> Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
> 1) fixed sytle issus
> 2) queues array protects by rcu as well
>
>   drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
>   1 file changed, 38 insertions(+), 43 deletions(-)
>
>

Reviewed-by: James Smart <james.smart@broadcom.com>

-- james


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4163 bytes --]

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
@ 2021-01-11 23:46       ` James Smart
  0 siblings, 0 replies; 19+ messages in thread
From: James Smart @ 2021-01-11 23:46 UTC (permalink / raw)
  To: leonid.ravich
  Cc: Sagi Grimberg, Chaitanya Kulkarni, linux-kernel, linux-nvme,
	Christoph Hellwig, lravich


[-- Attachment #1.1: Type: text/plain, Size: 888 bytes --]



On 1/3/2021 10:12 AM, leonid.ravich@dell.com wrote:
> From: Leonid Ravich <Leonid.Ravich@emc.com>
>
> searching assoc_list protected by rcu_read_lock if list not changed inline.
> and according to the rcu list rules.
>
> queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
> according to rcu dereference/assign rules.
>
> queue and assoc object freed after grace period by call_rcu.
>
> tgtport lock taken for changing assoc_list.
>
> Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
> Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
> Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
> ---
> 1) fixed sytle issus
> 2) queues array protects by rcu as well
>
>   drivers/nvme/target/fc.c | 81 +++++++++++++++++++++++-------------------------
>   1 file changed, 38 insertions(+), 43 deletions(-)
>
>

Reviewed-by: James Smart <james.smart@broadcom.com>

-- james


[-- Attachment #1.2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4163 bytes --]

[-- Attachment #2: Type: text/plain, Size: 158 bytes --]

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
  2021-01-03 18:12     ` leonid.ravich
@ 2021-01-12  8:26       ` Christoph Hellwig
  -1 siblings, 0 replies; 19+ messages in thread
From: Christoph Hellwig @ 2021-01-12  8:26 UTC (permalink / raw)
  To: leonid.ravich
  Cc: james.smart, lravich, Christoph Hellwig, Sagi Grimberg,
	Chaitanya Kulkarni, linux-nvme, linux-kernel

Thanks,

applied to nvme-5.11.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible.
@ 2021-01-12  8:26       ` Christoph Hellwig
  0 siblings, 0 replies; 19+ messages in thread
From: Christoph Hellwig @ 2021-01-12  8:26 UTC (permalink / raw)
  To: leonid.ravich
  Cc: Sagi Grimberg, Chaitanya Kulkarni, james.smart, linux-nvme,
	linux-kernel, Christoph Hellwig, lravich

Thanks,

applied to nvme-5.11.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2021-01-12  8:27 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-24 11:05 [PATCH] nvmet-fc: associations list replaced with hlist rcu, leonid.ravich
2020-12-24 11:05 ` leonid.ravich
2020-12-24 11:05 ` leonid.ravich
2020-12-24 11:05   ` leonid.ravich
2021-01-11 18:12   ` James Smart
2021-01-11 18:12     ` James Smart
2020-12-25 21:09 ` kernel test robot
2020-12-25 21:09   ` kernel test robot
2020-12-25 21:09   ` kernel test robot
2021-01-03 18:12 ` [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible leonid.ravich
2021-01-03 18:12   ` leonid.ravich
2021-01-03 18:12   ` leonid.ravich
2021-01-03 18:12     ` leonid.ravich
2021-01-11 23:46     ` James Smart
2021-01-11 23:46       ` James Smart
2021-01-12  8:26     ` Christoph Hellwig
2021-01-12  8:26       ` Christoph Hellwig
2021-01-11  8:11   ` Christoph Hellwig
2021-01-11  8:11     ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.