linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Hannes Reinecke <hare@suse.de>
To: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>, Keith Busch <kbusch@kernel.org>,
	linux-nvme@lists.infradead.org, Hannes Reinecke <hare@suse.de>
Subject: [PATCH 2/2] nvme-auth: use xarray instead of linked list
Date: Fri, 28 Oct 2022 15:50:27 +0200	[thread overview]
Message-ID: <20221028135027.116044-3-hare@suse.de> (raw)
In-Reply-To: <20221028135027.116044-1-hare@suse.de>

The current design of holding the chap context is slightly awkward,
as the context is allocated on demand, and we have to lock the list
when looking up contexts as we wouldn't know if the context is
allocated.

This patch moves the allocation out of the chap context before starting
authentication and stores it into an xarray. With that we can do
away with the lock and access the context directly via the queue number.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/host/auth.c | 116 ++++++++++++++++++++++-----------------
 drivers/nvme/host/nvme.h |   3 +-
 2 files changed, 66 insertions(+), 53 deletions(-)

diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index b68fb2c764f6..7b974bd0fa64 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -72,10 +72,12 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
 				     0, flags, nvme_max_retries);
 	if (ret > 0)
 		dev_warn(ctrl->device,
-			"qid %d auth_send failed with status %d\n", qid, ret);
+			"qid %d auth_%s failed with status %d\n",
+			 qid, auth_send ? "send" : "recv", ret);
 	else if (ret < 0)
 		dev_err(ctrl->device,
-			"qid %d auth_send failed with error %d\n", qid, ret);
+			"qid %d auth_%s failed with error %d\n",
+			qid, auth_send ? "send" : "recv", ret);
 	return ret;
 }
 
@@ -870,29 +872,42 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
 		return -ENOKEY;
 	}
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	/* Check if the context is already queued */
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid == qid) {
-			dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
-			mutex_unlock(&ctrl->dhchap_auth_mutex);
-			flush_work(&chap->auth_work);
-			__nvme_auth_reset(chap);
-			queue_work(nvme_wq, &chap->auth_work);
-			return 0;
-		}
-	}
-	chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+	if (qid == NVME_QID_ANY)
+		qid = 0;
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
 	if (!chap) {
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		return -ENOMEM;
-	}
-	chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
-	chap->ctrl = ctrl;
+		int ret;
+
+		chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+		if (!chap) {
+			dev_warn(ctrl->device,
+				 "qid %d: error allocation authentication", qid);
+			return -ENOMEM;
+		}
+		chap->qid = qid;
+		chap->ctrl = ctrl;
 
-	INIT_WORK(&chap->auth_work, __nvme_auth_work);
-	list_add(&chap->entry, &ctrl->dhchap_auth_list);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
+		INIT_WORK(&chap->auth_work, __nvme_auth_work);
+		ret = xa_insert(&ctrl->dhchap_auth_xa, qid, chap, GFP_KERNEL);
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid %d: error %d inserting authentication",
+				 qid, ret);
+			kfree(chap);
+			return ret;
+		}
+	} else {
+		if (chap->qid != qid) {
+			dev_warn(ctrl->device,
+				 "qid %d: authentication qid mismatch (%d)!",
+				 chap->qid, qid);
+			chap = xa_erase(&ctrl->dhchap_auth_xa, qid);
+			__nvme_auth_free(chap);
+			return -ENOENT;
+		}
+		flush_work(&chap->auth_work);
+		__nvme_auth_reset(chap);
+	}
 	queue_work(nvme_wq, &chap->auth_work);
 	return 0;
 }
@@ -901,33 +916,35 @@ EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
 {
 	struct nvme_dhchap_queue_context *chap;
-	int ret;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid != qid)
-			continue;
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		flush_work(&chap->auth_work);
-		ret = chap->error;
-		return ret;
+	if (qid == NVME_QID_ANY)
+		qid = 0;
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
+	if (!chap) {
+		dev_warn(ctrl->device,
+			 "qid %d: authentication not initialized!",
+			 qid);
+		return -ENOENT;
+	} else if (chap->qid != qid) {
+		dev_warn(ctrl->device,
+			 "qid %d: authentication qid mismatch (%d)!",
+			 chap->qid, qid);
+		return -ENOENT;
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
-	return -ENXIO;
+	flush_work(&chap->auth_work);
+	return chap->error;
 }
 EXPORT_SYMBOL_GPL(nvme_auth_wait);
 
 void nvme_auth_reset(struct nvme_ctrl *ctrl)
 {
 	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
 		flush_work(&chap->auth_work);
 		__nvme_auth_reset(chap);
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_reset);
 
@@ -947,7 +964,7 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
 	ret = nvme_auth_wait(ctrl, 0);
 	if (ret) {
 		dev_warn(ctrl->device,
-			 "qid 0: authentication failed\n");
+			 "qid 0: authentication failed with %d\n", ret);
 		return;
 	}
 
@@ -969,9 +986,8 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
 
 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
 {
-	INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+	xa_init_flags(&ctrl->dhchap_auth_xa, XA_FLAGS_ALLOC);
 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
-	mutex_init(&ctrl->dhchap_auth_mutex);
 	if (!ctrl->opts)
 		return;
 	nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
@@ -981,27 +997,25 @@ EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
 
 void nvme_auth_stop(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
 	cancel_work_sync(&ctrl->dhchap_auth_work);
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap)
 		cancel_work_sync(&chap->auth_work);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_stop);
 
 void nvme_auth_free(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
-		list_del_init(&chap->entry);
-		flush_work(&chap->auth_work);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
+		chap = xa_erase(&ctrl->dhchap_auth_xa, qid);
 		__nvme_auth_free(chap);
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_destroy(&ctrl->dhchap_auth_xa);
 	if (ctrl->host_key) {
 		nvme_auth_free_key(ctrl->host_key);
 		ctrl->host_key = NULL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 32d9dc2d957e..d0b2d3e4b63f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -338,8 +338,7 @@ struct nvme_ctrl {
 
 #ifdef CONFIG_NVME_AUTH
 	struct work_struct dhchap_auth_work;
-	struct list_head dhchap_auth_list;
-	struct mutex dhchap_auth_mutex;
+	struct xarray dhchap_auth_xa;
 	struct nvme_dhchap_key *host_key;
 	struct nvme_dhchap_key *ctrl_key;
 	u16 transaction;
-- 
2.35.3



  parent reply	other threads:[~2022-10-28 13:50 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-28 13:50 [PATCH 0/2] nvme-auth: avoid locking during authentication Hannes Reinecke
2022-10-28 13:50 ` [PATCH 1/2] nvme-auth: allocate authentication buffer only during transaction Hannes Reinecke
2022-10-30  7:52   ` Christoph Hellwig
2022-10-31 17:46     ` Hannes Reinecke
2022-10-28 13:50 ` Hannes Reinecke [this message]
2022-10-30  8:00   ` [PATCH 2/2] nvme-auth: use xarray instead of linked list Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221028135027.116044-3-hare@suse.de \
    --to=hare@suse.de \
    --cc=hch@lst.de \
    --cc=kbusch@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).