linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Wagner <dwagner@suse.de>
To: Sagi Grimberg <sagi@grimberg.me>
Cc: "Knight, Frederick" <Frederick.Knight@netapp.com>,
	"linux-nvme@lists.infradead.org" <linux-nvme@lists.infradead.org>,
	Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>,
	"hare@suse.de" <hare@suse.de>
Subject: Re: [PATCH v2] nvmet: force reconnect when number of queue changes
Date: Wed, 5 Oct 2022 20:15:10 +0200	[thread overview]
Message-ID: <20221005181510.zu7vcwjtltcal2zi@carbon.lan> (raw)
In-Reply-To: <b121c092-cecc-b891-be3c-b32c2a3e611d@grimberg.me>

On Wed, Sep 28, 2022 at 05:23:06PM +0300, Sagi Grimberg wrote:
> > As far I can tell, what's is missing from a testing point of view is the
> > ability to fail requests without the DNR bit set or the ability to tell
> > the host to reconnect. Obviously, an AEN would be nice for this but I
> > don't know if this is reason enough to extend the spec.
> 
> Looking into the code, its the connect that fails on invalid parameter
> with a DNR, because the host is attempting to connect to a subsystems
> that does not exist on the port (because it was taken offline for
> maintenance reasons).
> 
> So I guess it is valid to allow queue change without removing it from
> the port, but that does not change the fundamental question on DNR.
> If the host sees a DNR error on connect, my interpretation is that the
> host should not retry the connect command itself, but it shouldn't imply
> anything on tearing down the controller and giving up on it completely,
> forever.

Okay, let me try to avoid the DNR discussion for now and propose
something else? What about adding a 'enable' attribute to the subsys?

The snipped below does the trick. Though There is no explicit
synchronization between host and target, so it's possible the host
doesn't notice that the subsystem toggled enabled and updated the number
queues. But not sure if it's worth to address this, it feels a bit
over-engineered.


diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 051a420d818e..2438f5e04409 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1294,6 +1294,9 @@ static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
 	struct nvmet_ctrl *ctrl;
 	u16 qid_max;
 
+	if (subsys->enabled)
+		return -EACCES;
+
 	if (sscanf(page, "%hu\n", &qid_max) != 1)
 		return -EINVAL;
 
@@ -1302,16 +1305,39 @@ static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
 
 	down_write(&nvmet_config_sem);
 	subsys->max_qid = qid_max;
-
-	/* Force reconnect */
-	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
-		ctrl->ops->delete_ctrl(ctrl);
 	up_write(&nvmet_config_sem);
 
 	return cnt;
 }
 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
 
+static ssize_t nvmet_subsys_attr_enable_show(struct config_item *item,
+					     char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->enabled);
+}
+
+static ssize_t nvmet_subsys_attr_enable_store(struct config_item *item,
+					      const char *page, size_t cnt)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+	bool enable;
+
+	if (strtobool(page, &enable))
+		goto inval;
+
+	if (enable)
+		nvmet_subsystem_enable(subsys);
+	else
+		nvmet_subsystem_disable(subsys);
+
+	return cnt;
+inval:
+	pr_err("Invalid value '%s' for enable\n", page);
+	return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_enable);
+
 static struct configfs_attribute *nvmet_subsys_attrs[] = {
 	&nvmet_subsys_attr_attr_allow_any_host,
 	&nvmet_subsys_attr_attr_version,
@@ -1320,6 +1346,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
 	&nvmet_subsys_attr_attr_cntlid_max,
 	&nvmet_subsys_attr_attr_model,
 	&nvmet_subsys_attr_attr_qid_max,
+	&nvmet_subsys_attr_attr_enable,
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 	&nvmet_subsys_attr_attr_pi_enable,
 #endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8e3cf0c3588c..487842f16d67 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -546,6 +546,22 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns)
 	return oldsize != ns->size;
 }
 
+int nvmet_subsystem_enable(struct nvmet_subsys *subsys)
+{
+	mutex_lock(&subsys->lock);
+	subsys->enabled = true;
+	mutex_unlock(&subsys->lock);
+
+	return 0;
+}
+
+void nvmet_subsystem_disable(struct nvmet_subsys *subsys)
+{
+	mutex_lock(&subsys->lock);
+	subsys->enabled = false;
+	mutex_unlock(&subsys->lock);
+}
+
 int nvmet_ns_enable(struct nvmet_ns *ns)
 {
 	struct nvmet_subsys *subsys = ns->subsys;
@@ -954,7 +970,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 	if (unlikely(!req->sq->ctrl))
 		/* will return an error for any non-connect command: */
 		status = nvmet_parse_connect_cmd(req);
-	else if (likely(req->sq->qid != 0))
+	else if (unlikely(!req->sq->ctrl->subsys->enabled)) {
+		req->error_loc = offsetof(struct nvme_common_command, dptr);
+		status = NVME_SC_INTERNAL;
+	} else if (likely(req->sq->qid != 0))
 		status = nvmet_parse_io_cmd(req);
 	else
 		status = nvmet_parse_admin_cmd(req);
@@ -1368,6 +1387,15 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	}
 
 	down_read(&nvmet_config_sem);
+	if (!subsys->enabled) {
+		pr_info("connect by host %s for disabled subsystem %s not possible\n",
+			hostnqn, subsysnqn);
+		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+		up_read(&nvmet_config_sem);
+		status = NVME_SC_INTERNAL;
+		req->error_loc = offsetof(struct nvme_common_command, dptr);
+		goto out_put_subsystem;
+	}
 	if (!nvmet_host_allowed(subsys, hostnqn)) {
 		pr_info("connect by host %s for subsystem %s not allowed\n",
 			hostnqn, subsysnqn);
@@ -1588,6 +1616,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 	INIT_LIST_HEAD(&subsys->ctrls);
 	INIT_LIST_HEAD(&subsys->hosts);
 
+	subsys->enabled = true;
+
 	return subsys;
 
 free_mn:
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index dfe3894205aa..269bfc979870 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -235,6 +235,7 @@ struct nvmet_ctrl {
 
 struct nvmet_subsys {
 	enum nvme_subsys_type	type;
+	bool			enabled;
 
 	struct mutex		lock;
 	struct kref		ref;
@@ -486,6 +487,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 		enum nvme_subsys_type type);
 void nvmet_subsys_put(struct nvmet_subsys *subsys);
 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
+int nvmet_subsystem_enable(struct nvmet_subsys *subsys);
+void nvmet_subsystem_disable(struct nvmet_subsys *subsys);
 
 u16 nvmet_req_find_ns(struct nvmet_req *req);
 void nvmet_put_namespace(struct nvmet_ns *ns);


  parent reply	other threads:[~2022-10-05 18:15 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-27 14:31 [PATCH v2] nvmet: force reconnect when number of queue changes Daniel Wagner
2022-09-27 15:01 ` Daniel Wagner
2022-09-28  6:55 ` Sagi Grimberg
2022-09-28  7:48   ` Daniel Wagner
2022-09-28  8:31     ` Sagi Grimberg
2022-09-28  9:02       ` Daniel Wagner
2022-09-28 12:39         ` Knight, Frederick
2022-09-28 13:50           ` Daniel Wagner
2022-09-28 14:23             ` Sagi Grimberg
2022-09-28 15:21               ` Hannes Reinecke
2022-09-28 16:01               ` Knight, Frederick
2022-10-05 18:15               ` Daniel Wagner [this message]
2022-10-06 11:37                 ` Sagi Grimberg
2022-10-06 20:15                   ` James Smart
2022-10-06 20:54                     ` Knight, Frederick
2022-09-28 16:01             ` Knight, Frederick
2022-09-28 15:01           ` John Meneghini
2022-09-28 15:26             ` Daniel Wagner
2022-09-28 18:02               ` Knight, Frederick
2022-09-29  2:14                 ` John Meneghini
2022-09-29  3:04                   ` Knight, Frederick
2022-09-30  7:03                   ` Hannes Reinecke
2022-09-30  6:32                 ` Hannes Reinecke

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221005181510.zu7vcwjtltcal2zi@carbon.lan \
    --to=dwagner@suse.de \
    --cc=Frederick.Knight@netapp.com \
    --cc=hare@suse.de \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    --cc=shinichiro.kawasaki@wdc.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).