All of lore.kernel.org
 help / color / mirror / Atom feed
From: leonid.ravich@dell.com
To: james.smart@broadcom.com
Cc: lravich@gmail.com, Leonid Ravich <Leonid.Ravich@dell.com>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>,
	linux-nvme@lists.infradead.org, linux-kernel@vger.kernel.org
Subject: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
Date: Thu, 24 Dec 2020 13:05:41 +0200	[thread overview]
Message-ID: <20201224110542.22219-2-leonid.ravich@dell.com> (raw)
In-Reply-To: <20201224110542.22219-1-leonid.ravich@dell.com>

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


WARNING: multiple messages have this Message-ID (diff)
From: leonid.ravich@dell.com
To: james.smart@broadcom.com
Cc: Sagi Grimberg <sagi@grimberg.me>,
	Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>,
	linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
	Leonid Ravich <Leonid.Ravich@dell.com>,
	Christoph Hellwig <hch@lst.de>,
	lravich@gmail.com
Subject: [PATCH] nvmet-fc: associations list replaced with hlist rcu,
Date: Thu, 24 Dec 2020 13:05:41 +0200	[thread overview]
Message-ID: <20201224110542.22219-2-leonid.ravich@dell.com> (raw)
In-Reply-To: <20201224110542.22219-1-leonid.ravich@dell.com>

From: Leonid Ravich <Leonid.Ravich@emc.com>

to remove locking from nvmet_fc_find_target_queue
which called per IO.

Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
---
 drivers/nvme/target/fc.c | 54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..3928a17d073c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -105,7 +105,7 @@ struct nvmet_fc_tgtport {
 	struct list_head		ls_rcv_list;
 	struct list_head		ls_req_list;
 	struct list_head		ls_busylist;
-	struct list_head		assoc_list;
+	struct hlist_head		assoc_list;
 	struct list_head		host_list;
 	struct ida			assoc_cnt;
 	struct nvmet_fc_port_entry	*pe;
@@ -163,10 +163,11 @@ struct nvmet_fc_tgt_assoc {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_hostport	*hostport;
 	struct nvmet_fc_ls_iod		*rcv_disconn;
-	struct list_head		a_list;
+	struct hlist_node		a_list;
 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
 	struct kref			ref;
 	struct work_struct		del_work;
+	struct rcu_head		rcu_head;
 };
 
 
@@ -965,24 +966,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
 	struct nvmet_fc_tgt_queue *queue;
 	u64 association_id = nvmet_fc_getassociationid(connection_id);
 	u16 qid = nvmet_fc_getqueueid(connection_id);
-	unsigned long flags;
 
 	if (qid > NVMET_NR_QUEUES)
 		return NULL;
 
-	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			queue = assoc->queues[qid];
 			if (queue &&
 			    (!atomic_read(&queue->connected) ||
 			     !nvmet_fc_tgt_q_get(queue)))
 				queue = NULL;
-			spin_unlock_irqrestore(&tgtport->lock, flags);
+			rcu_read_unlock();
 			return queue;
 		}
 	}
-	spin_unlock_irqrestore(&tgtport->lock, flags);
+	rcu_read_unlock();
 	return NULL;
 }
 
@@ -1118,7 +1118,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 	assoc->tgtport = tgtport;
 	assoc->a_id = idx;
-	INIT_LIST_HEAD(&assoc->a_list);
+	INIT_HLIST_NODE(&assoc->a_list);
 	kref_init(&assoc->ref);
 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
 	atomic_set(&assoc->terminating, 0);
@@ -1129,7 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 
 		spin_lock_irqsave(&tgtport->lock, flags);
 		needrandom = false;
-		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
 			if (ran == tmpassoc->association_id) {
 				needrandom = true;
 				break;
@@ -1137,7 +1137,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 		}
 		if (!needrandom) {
 			assoc->association_id = ran;
-			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+			hlist_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
 		}
 		spin_unlock_irqrestore(&tgtport->lock, flags);
 	}
@@ -1153,6 +1153,17 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 	return NULL;
 }
 
+void nvmet_assoc_free_queue_rcu(struct rcu_head *rcu_head) {
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(rcu_head, struct nvmet_fc_tgt_assoc, rcu_head);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+	kfree(assoc);
+	dev_info(tgtport->dev,
+		"{%d:%d} Association freed\n",
+		tgtport->fc_target_port.port_num, assoc->a_id);
+}
+
 static void
 nvmet_fc_target_assoc_free(struct kref *ref)
 {
@@ -1167,17 +1178,14 @@ nvmet_fc_target_assoc_free(struct kref *ref)
 
 	nvmet_fc_free_hostport(assoc->hostport);
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_del(&assoc->a_list);
+	hlist_del_rcu(&assoc->a_list);
 	oldls = assoc->rcv_disconn;
 	spin_unlock_irqrestore(&tgtport->lock, flags);
 	/* if pending Rcv Disconnect Association LS, send rsp now */
 	if (oldls)
 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
-	dev_info(tgtport->dev,
-		"{%d:%d} Association freed\n",
-		tgtport->fc_target_port.port_num, assoc->a_id);
-	kfree(assoc);
+	call_rcu(&assoc->rcu_head, nvmet_assoc_free_queue_rcu);
 	nvmet_fc_tgtport_put(tgtport);
 }
 
@@ -1237,7 +1245,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+	hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 		if (association_id == assoc->association_id) {
 			ret = assoc;
 			if (!nvmet_fc_tgt_a_get(assoc))
@@ -1397,7 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
 	INIT_LIST_HEAD(&newrec->ls_busylist);
-	INIT_LIST_HEAD(&newrec->assoc_list);
+	INIT_HLIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->host_list);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
@@ -1473,11 +1481,12 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
 static void
 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
 {
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!nvmet_fc_tgt_a_get(assoc))
 			continue;
@@ -1522,12 +1531,13 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
 			void *hosthandle)
 {
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
-	struct nvmet_fc_tgt_assoc *assoc, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct hlist_node *next;
 	unsigned long flags;
 	bool noassoc = true;
 
 	spin_lock_irqsave(&tgtport->lock, flags);
-	list_for_each_entry_safe(assoc, next,
+	hlist_for_each_entry_safe(assoc, next,
 				&tgtport->assoc_list, a_list) {
 		if (!assoc->hostport ||
 		    assoc->hostport->hosthandle != hosthandle)
@@ -1569,7 +1579,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
 
 		spin_lock_irqsave(&tgtport->lock, flags);
-		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		hlist_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
 			queue = assoc->queues[0];
 			if (queue && queue->nvme_sq.ctrl == ctrl) {
 				if (nvmet_fc_tgt_a_get(assoc))
-- 
2.16.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2020-12-24 11:07 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-24 11:05 [PATCH] nvmet-fc: associations list replaced with hlist rcu, leonid.ravich
2020-12-24 11:05 ` leonid.ravich
2020-12-24 11:05 ` leonid.ravich [this message]
2020-12-24 11:05   ` leonid.ravich
2021-01-11 18:12   ` James Smart
2021-01-11 18:12     ` James Smart
2020-12-25 21:09 ` kernel test robot
2020-12-25 21:09   ` kernel test robot
2020-12-25 21:09   ` kernel test robot
2021-01-03 18:12 ` [PATCH v2] nvmet-fc: associations list protected by rcu, instead of spinlock_irq where possible leonid.ravich
2021-01-03 18:12   ` leonid.ravich
2021-01-03 18:12   ` leonid.ravich
2021-01-03 18:12     ` leonid.ravich
2021-01-11 23:46     ` James Smart
2021-01-11 23:46       ` James Smart
2021-01-12  8:26     ` Christoph Hellwig
2021-01-12  8:26       ` Christoph Hellwig
2021-01-11  8:11   ` Christoph Hellwig
2021-01-11  8:11     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201224110542.22219-2-leonid.ravich@dell.com \
    --to=leonid.ravich@dell.com \
    --cc=chaitanya.kulkarni@wdc.com \
    --cc=hch@lst.de \
    --cc=james.smart@broadcom.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=lravich@gmail.com \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.