All of lore.kernel.org
 help / color / mirror / Atom feed
* properly validate the nvme uniqueue identifiers are unique v2
@ 2022-02-24 19:28 Christoph Hellwig
  2022-02-24 19:28 ` [PATCH 1/4] nvme: cleanup __nvme_check_ids Christoph Hellwig
                   ` (4 more replies)
  0 siblings, 5 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-02-24 19:28 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg; +Cc: linux-nvme

Hi all,

this series fixed how the nvme driver check for unique identifier
and then also applied that logic across subsystems.

Changes since v1:
 - various small cleanups


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 1/4] nvme: cleanup __nvme_check_ids
  2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
@ 2022-02-24 19:28 ` Christoph Hellwig
  2022-02-24 22:50   ` Chaitanya Kulkarni
  2022-02-24 19:28 ` [PATCH 2/4] nvme: fix the check for duplicate unique identifiers Christoph Hellwig
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-02-24 19:28 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg; +Cc: linux-nvme

Pass the actual nvme_ns_ids used for the comparison instead of the
ns_head that isn't needed and use a more descriptive function name.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9cffc4770e737..076a03b801b7e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3673,16 +3673,15 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
 	return NULL;
 }
 
-static int __nvme_check_ids(struct nvme_subsystem *subsys,
-		struct nvme_ns_head *new)
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+		struct nvme_ns_ids *ids)
 {
 	struct nvme_ns_head *h;
 
 	lockdep_assert_held(&subsys->lock);
 
 	list_for_each_entry(h, &subsys->nsheads, entry) {
-		if (nvme_ns_ids_valid(&new->ids) &&
-		    nvme_ns_ids_equal(&new->ids, &h->ids))
+		if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids))
 			return -EINVAL;
 	}
 
@@ -3781,7 +3780,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
 	head->ids = *ids;
 	kref_init(&head->ref);
 
-	ret = __nvme_check_ids(ctrl->subsys, head);
+	ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
 	if (ret) {
 		dev_err(ctrl->device,
 			"duplicate IDs for nsid %d\n", nsid);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 2/4] nvme: fix the check for duplicate unique identifiers
  2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
  2022-02-24 19:28 ` [PATCH 1/4] nvme: cleanup __nvme_check_ids Christoph Hellwig
@ 2022-02-24 19:28 ` Christoph Hellwig
  2022-02-24 22:51   ` Chaitanya Kulkarni
  2022-02-24 19:28 ` [PATCH 3/4] nvme: check for duplicate identifiers earlier Christoph Hellwig
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-02-24 19:28 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg; +Cc: linux-nvme

nvme_subsys_check_duplicate_ids should needs to return an error if any of
the identifiers matches, not just if all of them match.  But it does not
need to and should not look at the CSI value for this sanity check.

Rewrite the logic to be separate from nvme_ns_ids_equal and optimize it
by reducing duplicate checks for non-present identifiers.

Fixes: ed754e5deeb1 ("nvme: track shared namespaces")
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 076a03b801b7e..3e6ac985b24f6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1716,13 +1716,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
 }
 
-static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
-{
-	return !uuid_is_null(&ids->uuid) ||
-		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
-		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
-}
-
 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
 {
 	return uuid_equal(&a->uuid, &b->uuid) &&
@@ -3676,12 +3669,21 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
 		struct nvme_ns_ids *ids)
 {
+	bool has_uuid = !uuid_is_null(&ids->uuid);
+	bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
+	bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
 	struct nvme_ns_head *h;
 
 	lockdep_assert_held(&subsys->lock);
 
 	list_for_each_entry(h, &subsys->nsheads, entry) {
-		if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids))
+		if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
+			return -EINVAL;
+		if (has_nguid &&
+		    memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
+			return -EINVAL;
+		if (has_eui64 &&
+		    memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
 			return -EINVAL;
 	}
 
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 3/4] nvme: check for duplicate identifiers earlier
  2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
  2022-02-24 19:28 ` [PATCH 1/4] nvme: cleanup __nvme_check_ids Christoph Hellwig
  2022-02-24 19:28 ` [PATCH 2/4] nvme: fix the check for duplicate unique identifiers Christoph Hellwig
@ 2022-02-24 19:28 ` Christoph Hellwig
  2022-02-24 22:52   ` Chaitanya Kulkarni
  2022-02-24 19:28 ` [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Christoph Hellwig
  2022-02-24 19:38 ` properly validate the nvme uniqueue identifiers are unique v2 Keith Busch
  4 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-02-24 19:28 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg; +Cc: linux-nvme

Lift the check for duplicate identifiers into nvme_init_ns_head, which
avoids pointless error unwinding in case they don't match, and also
matches where we check identifier validity for the multipath case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3e6ac985b24f6..cece987ba1691 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3782,13 +3782,6 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
 	head->ids = *ids;
 	kref_init(&head->ref);
 
-	ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
-	if (ret) {
-		dev_err(ctrl->device,
-			"duplicate IDs for nsid %d\n", nsid);
-		goto out_cleanup_srcu;
-	}
-
 	if (head->ids.csi) {
 		ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
 		if (ret)
@@ -3827,6 +3820,12 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
 	mutex_lock(&ctrl->subsys->lock);
 	head = nvme_find_ns_head(ctrl->subsys, nsid);
 	if (!head) {
+		ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+		if (ret) {
+			dev_err(ctrl->device,
+				"duplicate IDs for nsid %d\n", nsid);
+			goto out_unlock;
+		}
 		head = nvme_alloc_ns_head(ctrl, nsid, ids);
 		if (IS_ERR(head)) {
 			ret = PTR_ERR(head);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
                   ` (2 preceding siblings ...)
  2022-02-24 19:28 ` [PATCH 3/4] nvme: check for duplicate identifiers earlier Christoph Hellwig
@ 2022-02-24 19:28 ` Christoph Hellwig
  2022-02-24 22:54   ` Chaitanya Kulkarni
  2022-04-08  1:04   ` Luis Chamberlain
  2022-02-24 19:38 ` properly validate the nvme uniqueue identifiers are unique v2 Keith Busch
  4 siblings, 2 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-02-24 19:28 UTC (permalink / raw)
  To: Keith Busch, Sagi Grimberg; +Cc: linux-nvme

Add a check to verify that the unique identifiers are unique globally
in addition to the existing check that verifies that they are unique
inside a single subsystem.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 38 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 36 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cece987ba1691..f8084ded69e50 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3810,12 +3810,45 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
 	return ERR_PTR(ret);
 }
 
+static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
+		struct nvme_ns_ids *ids)
+{
+	struct nvme_subsystem *s;
+	int ret = 0;
+
+	/*
+	 * Note that this check is racy as we try to avoid holding the global
+	 * lock over the whole ns_head creation.  But it is only intended as
+	 * a sanity check anyway.
+	 */
+	mutex_lock(&nvme_subsystems_lock);
+	list_for_each_entry(s, &nvme_subsystems, entry) {
+		if (s == this)
+			continue;
+		mutex_lock(&s->lock);
+		ret = nvme_subsys_check_duplicate_ids(s, ids);
+		mutex_unlock(&s->lock);
+		if (ret)
+			break;
+	}
+	mutex_unlock(&nvme_subsystems_lock);
+
+	return ret;
+}
+
 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
 		struct nvme_ns_ids *ids, bool is_shared)
 {
 	struct nvme_ctrl *ctrl = ns->ctrl;
 	struct nvme_ns_head *head = NULL;
-	int ret = 0;
+	int ret;
+
+	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+	if (ret) {
+		dev_err(ctrl->device,
+			"globally duplicate IDs for nsid %d\n", nsid);
+		return ret;
+	}
 
 	mutex_lock(&ctrl->subsys->lock);
 	head = nvme_find_ns_head(ctrl->subsys, nsid);
@@ -3823,7 +3856,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
 		ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
 		if (ret) {
 			dev_err(ctrl->device,
-				"duplicate IDs for nsid %d\n", nsid);
+				"duplicate IDs in subsystem for nsid %d\n",
+				nsid);
 			goto out_unlock;
 		}
 		head = nvme_alloc_ns_head(ctrl, nsid, ids);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: properly validate the nvme uniqueue identifiers are unique v2
  2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
                   ` (3 preceding siblings ...)
  2022-02-24 19:28 ` [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Christoph Hellwig
@ 2022-02-24 19:38 ` Keith Busch
  4 siblings, 0 replies; 51+ messages in thread
From: Keith Busch @ 2022-02-24 19:38 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Sagi Grimberg, linux-nvme

Series looks good.

Reviewed-by: Keith Busch <kbusch@kernel.org>


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 1/4] nvme: cleanup __nvme_check_ids
  2022-02-24 19:28 ` [PATCH 1/4] nvme: cleanup __nvme_check_ids Christoph Hellwig
@ 2022-02-24 22:50   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 51+ messages in thread
From: Chaitanya Kulkarni @ 2022-02-24 22:50 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, Sagi Grimberg, Keith Busch

On 2/24/22 11:28, Christoph Hellwig wrote:
> Pass the actual nvme_ns_ids used for the comparison instead of the
> ns_head that isn't needed and use a more descriptive function name.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 2/4] nvme: fix the check for duplicate unique identifiers
  2022-02-24 19:28 ` [PATCH 2/4] nvme: fix the check for duplicate unique identifiers Christoph Hellwig
@ 2022-02-24 22:51   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 51+ messages in thread
From: Chaitanya Kulkarni @ 2022-02-24 22:51 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg; +Cc: linux-nvme

On 2/24/22 11:28, Christoph Hellwig wrote:
> nvme_subsys_check_duplicate_ids should needs to return an error if any of
> the identifiers matches, not just if all of them match.  But it does not
> need to and should not look at the CSI value for this sanity check.
> 
> Rewrite the logic to be separate from nvme_ns_ids_equal and optimize it
> by reducing duplicate checks for non-present identifiers.
> 
> Fixes: ed754e5deeb1 ("nvme: track shared namespaces")
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 3/4] nvme: check for duplicate identifiers earlier
  2022-02-24 19:28 ` [PATCH 3/4] nvme: check for duplicate identifiers earlier Christoph Hellwig
@ 2022-02-24 22:52   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 51+ messages in thread
From: Chaitanya Kulkarni @ 2022-02-24 22:52 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg; +Cc: linux-nvme

On 2/24/22 11:28, Christoph Hellwig wrote:
> Lift the check for duplicate identifiers into nvme_init_ns_head, which
> avoids pointless error unwinding in case they don't match, and also
> matches where we check identifier validity for the multipath case.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/core.c | 13 ++++++-------
>   1 file changed, 6 insertions(+), 7 deletions(-)
> 


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-02-24 19:28 ` [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Christoph Hellwig
@ 2022-02-24 22:54   ` Chaitanya Kulkarni
  2022-04-08  1:04   ` Luis Chamberlain
  1 sibling, 0 replies; 51+ messages in thread
From: Chaitanya Kulkarni @ 2022-02-24 22:54 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme, Keith Busch, Sagi Grimberg

On 2/24/22 11:28, Christoph Hellwig wrote:
> Add a check to verify that the unique identifiers are unique globally
> in addition to the existing check that verifies that they are unique
> inside a single subsystem.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>





^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-02-24 19:28 ` [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Christoph Hellwig
  2022-02-24 22:54   ` Chaitanya Kulkarni
@ 2022-04-08  1:04   ` Luis Chamberlain
  2022-04-08  5:29     ` Christoph Hellwig
  1 sibling, 1 reply; 51+ messages in thread
From: Luis Chamberlain @ 2022-04-08  1:04 UTC (permalink / raw)
  To: Christoph Hellwig, Klaus Jensen
  Cc: Keith Busch, Sagi Grimberg, linux-nvme, mcgrof

On Thu, Feb 24, 2022 at 08:28:45PM +0100, Christoph Hellwig wrote:
> Add a check to verify that the unique identifiers are unique globally
> in addition to the existing check that verifies that they are unique
> inside a single subsystem.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/nvme/host/core.c | 38 ++++++++++++++++++++++++++++++++++++--
>  1 file changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index cece987ba1691..f8084ded69e50 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -3810,12 +3810,45 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
>  	return ERR_PTR(ret);
>  }
>  
> +static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
> +		struct nvme_ns_ids *ids)
> +{
> +	struct nvme_subsystem *s;
> +	int ret = 0;
> +
> +	/*
> +	 * Note that this check is racy as we try to avoid holding the global
> +	 * lock over the whole ns_head creation.  But it is only intended as
> +	 * a sanity check anyway.
> +	 */
> +	mutex_lock(&nvme_subsystems_lock);
> +	list_for_each_entry(s, &nvme_subsystems, entry) {
> +		if (s == this)
> +			continue;
> +		mutex_lock(&s->lock);
> +		ret = nvme_subsys_check_duplicate_ids(s, ids);
> +		mutex_unlock(&s->lock);
> +		if (ret)
> +			break;
> +	}
> +	mutex_unlock(&nvme_subsystems_lock);
> +
> +	return ret;
> +}
> +
>  static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
>  		struct nvme_ns_ids *ids, bool is_shared)
>  {
>  	struct nvme_ctrl *ctrl = ns->ctrl;
>  	struct nvme_ns_head *head = NULL;
> -	int ret = 0;
> +	int ret;
> +
> +	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
> +	if (ret) {
> +		dev_err(ctrl->device,
> +			"globally duplicate IDs for nsid %d\n", nsid);

So what sort of meachanisms would break in nvme if we don't (as it was
before this patch)? The reason I ask is that it would seem tons of
instantiated qemu guests used the default, and don't set the UUID,
and so this is auto-generated as per the documentation [0]. However,
I'm suspecting the auto-generation may just be... a single value:

root@kdevops ~ # cat
/sys/devices/pci0000:00/0000:00:08.0/nvme/nvme0/nvme0n1/uuid
00000001-0000-0000-0000-000000000000
root@kdevops ~ # cat
/sys/devices/pci0000:00/0000:00:0b.0/nvme/nvme3/nvme3n1/uuid
00000001-0000-0000-0000-000000000000
root@kdevops ~ # cat
/sys/devices/pci0000:00/0000:00:0b.0/nvme/nvme3/nvme3n1/uuid
00000001-0000-0000-0000-000000000000
root@kdevops ~ # cat
/sys/devices/pci0000:00/0000:00:0a.0/nvme/nvme2/nvme2n1/uuid
00000001-0000-0000-0000-000000000000

This means I have to go manually try to decipher this out on my
guests.

Printing a warning would only make sense if this really isn't critical.
But as of now this patch will even prevent boot as some filesystems now
can't mount which were set to mount on /etc/fstab.

[0] https://qemu-project.gitlab.io/qemu/system/devices/nvme.html

  Luis


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-08  1:04   ` Luis Chamberlain
@ 2022-04-08  5:29     ` Christoph Hellwig
  2022-04-08  7:19       ` Klaus Jensen
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-08  5:29 UTC (permalink / raw)
  To: Luis Chamberlain
  Cc: Christoph Hellwig, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme

On Thu, Apr 07, 2022 at 06:04:59PM -0700, Luis Chamberlain wrote:
> On Thu, Feb 24, 2022 at 08:28:45PM +0100, Christoph Hellwig wrote:
> > Add a check to verify that the unique identifiers are unique globally
> > in addition to the existing check that verifies that they are unique
> > inside a single subsystem.
> > 
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > ---
> >  drivers/nvme/host/core.c | 38 ++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 36 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> > index cece987ba1691..f8084ded69e50 100644
> > --- a/drivers/nvme/host/core.c
> > +++ b/drivers/nvme/host/core.c
> > @@ -3810,12 +3810,45 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
> >  	return ERR_PTR(ret);
> >  }
> >  
> > +static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
> > +		struct nvme_ns_ids *ids)
> > +{
> > +	struct nvme_subsystem *s;
> > +	int ret = 0;
> > +
> > +	/*
> > +	 * Note that this check is racy as we try to avoid holding the global
> > +	 * lock over the whole ns_head creation.  But it is only intended as
> > +	 * a sanity check anyway.
> > +	 */
> > +	mutex_lock(&nvme_subsystems_lock);
> > +	list_for_each_entry(s, &nvme_subsystems, entry) {
> > +		if (s == this)
> > +			continue;
> > +		mutex_lock(&s->lock);
> > +		ret = nvme_subsys_check_duplicate_ids(s, ids);
> > +		mutex_unlock(&s->lock);
> > +		if (ret)
> > +			break;
> > +	}
> > +	mutex_unlock(&nvme_subsystems_lock);
> > +
> > +	return ret;
> > +}
> > +
> >  static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
> >  		struct nvme_ns_ids *ids, bool is_shared)
> >  {
> >  	struct nvme_ctrl *ctrl = ns->ctrl;
> >  	struct nvme_ns_head *head = NULL;
> > -	int ret = 0;
> > +	int ret;
> > +
> > +	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
> > +	if (ret) {
> > +		dev_err(ctrl->device,
> > +			"globally duplicate IDs for nsid %d\n", nsid);
> 
> So what sort of meachanisms would break in nvme if we don't (as it was
> before this patch)?

The most directly visible problem is that this breaks the
/dev/disk/by-id/ links - all devices would have to point to the same
one, and which one actually shows up is somewhat random.  This basically
means random corruption if people actually use the links or the uuid
functionality in any other way.

Note that we have alredy checked for this inside a controller for a
long time, the commit just extended it across controllers.


> The reason I ask is that it would seem tons of
> instantiated qemu guests used the default, and don't set the UUID,
> and so this is auto-generated as per the documentation [0]. However,
> I'm suspecting the auto-generation may just be... a single value:

Odd.  All my qemu VM don't show a UUID unless specifically set.

> 
> root@kdevops ~ # cat
> /sys/devices/pci0000:00/0000:00:08.0/nvme/nvme0/nvme0n1/uuid
> 00000001-0000-0000-0000-000000000000

And the 1 really seems like a bug in your particular set.  What qemu
version is this?  Does it have any local or distro patches applied?


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-08  5:29     ` Christoph Hellwig
@ 2022-04-08  7:19       ` Klaus Jensen
  2022-04-08 16:10         ` Christoph Hellwig
  0 siblings, 1 reply; 51+ messages in thread
From: Klaus Jensen @ 2022-04-08  7:19 UTC (permalink / raw)
  To: Christoph Hellwig, Luis Chamberlain
  Cc: Keith Busch, Sagi Grimberg, linux-nvme



On Fri, Apr 8, 2022, at 07:29, Christoph Hellwig wrote:
> On Thu, Apr 07, 2022 at 06:04:59PM -0700, Luis Chamberlain wrote:
>> On Thu, Feb 24, 2022 at 08:28:45PM +0100, Christoph Hellwig wrote:
>> > Add a check to verify that the unique identifiers are unique globally
>> > in addition to the existing check that verifies that they are unique
>> > inside a single subsystem.
>> > 
>> > Signed-off-by: Christoph Hellwig <hch@lst.de>
>> > ---
>> >  drivers/nvme/host/core.c | 38 ++++++++++++++++++++++++++++++++++++--
>> >  1 file changed, 36 insertions(+), 2 deletions(-)
>> > 
>> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
>> > index cece987ba1691..f8084ded69e50 100644
>> > --- a/drivers/nvme/host/core.c
>> > +++ b/drivers/nvme/host/core.c
>> > @@ -3810,12 +3810,45 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
>> >  	return ERR_PTR(ret);
>> >  }
>> >  
>> > +static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
>> > +		struct nvme_ns_ids *ids)
>> > +{
>> > +	struct nvme_subsystem *s;
>> > +	int ret = 0;
>> > +
>> > +	/*
>> > +	 * Note that this check is racy as we try to avoid holding the global
>> > +	 * lock over the whole ns_head creation.  But it is only intended as
>> > +	 * a sanity check anyway.
>> > +	 */
>> > +	mutex_lock(&nvme_subsystems_lock);
>> > +	list_for_each_entry(s, &nvme_subsystems, entry) {
>> > +		if (s == this)
>> > +			continue;
>> > +		mutex_lock(&s->lock);
>> > +		ret = nvme_subsys_check_duplicate_ids(s, ids);
>> > +		mutex_unlock(&s->lock);
>> > +		if (ret)
>> > +			break;
>> > +	}
>> > +	mutex_unlock(&nvme_subsystems_lock);
>> > +
>> > +	return ret;
>> > +}
>> > +
>> >  static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
>> >  		struct nvme_ns_ids *ids, bool is_shared)
>> >  {
>> >  	struct nvme_ctrl *ctrl = ns->ctrl;
>> >  	struct nvme_ns_head *head = NULL;
>> > -	int ret = 0;
>> > +	int ret;
>> > +
>> > +	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
>> > +	if (ret) {
>> > +		dev_err(ctrl->device,
>> > +			"globally duplicate IDs for nsid %d\n", nsid);
>> 
>> So what sort of meachanisms would break in nvme if we don't (as it was
>> before this patch)?
>
> The most directly visible problem is that this breaks the
> /dev/disk/by-id/ links - all devices would have to point to the same
> one, and which one actually shows up is somewhat random.  This basically
> means random corruption if people actually use the links or the uuid
> functionality in any other way.
>
> Note that we have alredy checked for this inside a controller for a
> long time, the commit just extended it across controllers.
>
>
>> The reason I ask is that it would seem tons of
>> instantiated qemu guests used the default, and don't set the UUID,
>> and so this is auto-generated as per the documentation [0]. However,
>> I'm suspecting the auto-generation may just be... a single value:
>
> Odd.  All my qemu VM don't show a UUID unless specifically set.
>

Odd indeed. With “legacy/single namespace” setup (drive parameter directly on the nvme device), the uuid, eui64 and nguid should be zeroed.

Using the new -device nvme-ns, QEMU will randomize the uuid. However the eui64 will be more static and only differ with the namespace id so it will not be unique across subsystems (this needs fixing in QEMU).

In any case, with -device nvme-ns, uuid and eui64 can be set explicitly with device parameters and that should be used if predictable behavior is required.

>> 
>> root@kdevops ~ # cat
>> /sys/devices/pci0000:00/0000:00:08.0/nvme/nvme0/nvme0n1/uuid
>> 00000001-0000-0000-0000-000000000000
>
> And the 1 really seems like a bug in your particular set.  What qemu
> version is this?  Does it have any local or distro patches applied?

I cannot reproduce the 1 either, but QEMU might have spliced the maid into the uuid at some point in the past, can’t remember.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-08  7:19       ` Klaus Jensen
@ 2022-04-08 16:10         ` Christoph Hellwig
  2022-04-08 17:46           ` Luis Chamberlain
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-08 16:10 UTC (permalink / raw)
  To: Klaus Jensen
  Cc: Christoph Hellwig, Luis Chamberlain, Keith Busch, Sagi Grimberg,
	linux-nvme

On Fri, Apr 08, 2022 at 09:19:04AM +0200, Klaus Jensen wrote:
> Odd indeed. With “legacy/single namespace” setup (drive parameter directly on the nvme device), the uuid, eui64 and nguid should be zeroed.
> 
> Using the new -device nvme-ns, QEMU will randomize the uuid. However the eui64 will be more static and only differ with the namespace id so it will not be unique across subsystems (this needs fixing in QEMU).

Well, if that is the case we'll need to quirk the affeced qemu versions
as duplicate global ids must not happen.  Can you help to come up
with a heuristic to catch to affected versions?


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-08 16:10         ` Christoph Hellwig
@ 2022-04-08 17:46           ` Luis Chamberlain
  2022-04-11  5:05             ` Christoph Hellwig
  0 siblings, 1 reply; 51+ messages in thread
From: Luis Chamberlain @ 2022-04-08 17:46 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme, mcgrof

On Fri, Apr 08, 2022 at 06:10:52PM +0200, Christoph Hellwig wrote:
> On Fri, Apr 08, 2022 at 09:19:04AM +0200, Klaus Jensen wrote:
> > Odd indeed. With “legacy/single namespace” setup (drive parameter directly on the nvme device), the uuid, eui64 and nguid should be zeroed.
> > 
> > Using the new -device nvme-ns, QEMU will randomize the uuid. However the eui64 will be more static and only differ with the namespace id so it will not be unique across subsystems (this needs fixing in QEMU).
> 
> Well, if that is the case we'll need to quirk the affeced qemu versions
> as duplicate global ids must not happen.  Can you help to come up
> with a heuristic to catch to affected versions?

The above note from Klaus seems to indicate this is not yet fixed on
qemu for eu64. Please correct me if I'm wring Klaus.

However, what I'm seeing seems to show that the uuid is same uuid as
well when not using -device nvme-ns but just -device nvme (this is
called legacy now it seems?) without the uuid set you end up in the
situation I described. I just destroyed my guests and started from
scratch a set up using qemu-system-x86_64 v6.2.0 on debian-testing,
and end up in a different situation but it is still a bit perplexing.

What I did is I just got debian-testing guests on a host, use kdevops [0]
as follows:

make menuconfig # enable the linux workflow and use linux-next

grep ^CONFIG_BOOTLINUX .config
CONFIG_BOOTLINUX=y
CONFIG_BOOTLINUX_DEV=y
CONFIG_BOOTLINUX_TREE_NEXT=y
CONFIG_BOOTLINUX_TREE_NAME="linux-next"
CONFIG_BOOTLINUX_TREE="https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git"
CONFIG_BOOTLINUX_TREE_TAG="next-20220301"

grep CONFIG_KDEVOPS_HOSTS_PREFIX .config
CONFIG_KDEVOPS_HOSTS_PREFIX="kdevops"

make # generate ansible hosts file and vagrant nodes file
make bringup # bring up guests

ssh kdevops sudo lsblk -a -O -a| awk '{print $3"\t"$6"\t"$15"\t"$32}'
PATH    FSSIZE  UUID    GROUP
/dev/vda        dos     0       none
/dev/vda1       19.5G   52e64545        512
/dev/nvme1n1    0       root nvme.1b36-73657269616c31-51454d55204e564d65204374726c-00000001
/dev/nvme0n1    100G    128     512
/dev/nvme2n1    0       root nvme.1b36-73657269616c32-51454d55204e564d65204374726c-00000001
/dev/nvme3n1    0       root nvme.1b36-73657269616c33-51454d55204e564d65204374726c-00000001

So although not the same as before, I see something random but the same
but I can' tell what that nvme.1b36-... is really. Is that the UUID ?
GUID?

root@kdevops ~ # apt-get install nvme-cli
root@kdevops ~ # nvme ns-descs /dev/nvme0n1
NVME Namespace Identification Descriptors NS 1:
uuid    : 00000000-0000-0000-0000-000000000000
csi     : 0
root@kdevops ~ # nvme ns-descs /dev/nvme1n1
NVME Namespace Identification Descriptors NS 1:
uuid    : 00000000-0000-0000-0000-000000000000
csi     : 0
root@kdevops ~ # nvme ns-descs /dev/nvme2n1
NVME Namespace Identification Descriptors NS 1:
uuid    : 00000000-0000-0000-0000-000000000000
csi     : 0
root@kdevops ~ # nvme ns-descs /dev/nvme3n1
NVME Namespace Identification Descriptors NS 1:
uuid    : 00000000-0000-0000-0000-000000000000
csi     : 0

Huh, the UUID seem to be all zeroes now....

But for whatever reason this boots fine now on linux next:

# On the host:
make linux # build and install linux-next next-20220301

ssh kdevops uname -r # Verify everything went fine
5.17.0-rc6-next-20220301

Now ssh to kdevops and git fetch to the latest linux-next, make
and, install and reboot, and monitor on the console how it goes.

ssh kdevops
cd /data/linux-next
git fetch origin
git branch -D master
git checkout -b master origin/master
make oldconfig
make -j 8
sudo make modules_install install
sudo reboot

This

On the host monitor progress on reboot in case of
failure:

sudo virsh list
sudo virsh console domain_name_or_id

However I see things moving fine now. It is booting fine with
next-20220408. The only thing I changed was probably the version of
qemu-system-x86_64 for the *initial* bringup. I gather that an older
version of qemu was generating a uuid of 00000001-0000-0000-0000-000000000000
and now it's not clear what it is given the above. The other change was
that now /data/ is using btrfs by default in kdevops but even if I
create an XFS partition on /dev/nvme1n1 and mount it always using a
label on /etc/fstab things are good.

So... it may be that the old version of qemu which generated a
that 00000001-0000-0000-0000-000000000000 may create a conflict
with "nvme: check that EUI/GUID/UUID are globally unique" but
its not clear why not with the above newer qemu-system-x86_64 v6.2.0
given what is observed as well.

So a few question remain:

1) Shouldn't this kernel commit still be not allowing these nvme drives to be
used given the above even for qemu-system-x86_64 v6.2.0 ?

2) What version of qemu was issuing UUID which seem to be
something like 00000001-0000-0000-0000-000000000000 which *does*
cause an issue with this new kernel commit?

[0] https://github.com/mcgrof/kdevops

  Luis


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-08 17:46           ` Luis Chamberlain
@ 2022-04-11  5:05             ` Christoph Hellwig
  2022-04-11  5:54               ` Christoph Hellwig
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-11  5:05 UTC (permalink / raw)
  To: Luis Chamberlain
  Cc: Christoph Hellwig, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme

On Fri, Apr 08, 2022 at 10:46:59AM -0700, Luis Chamberlain wrote:
> The above note from Klaus seems to indicate this is not yet fixed on
> qemu for eu64. Please correct me if I'm wring Klaus.
> 
> However, what I'm seeing seems to show that the uuid is same uuid as
> well when not using -device nvme-ns but just -device nvme (this is
> called legacy now it seems?) without the uuid set you end up in the
> situation I described. I just destroyed my guests and started from
> scratch a set up using qemu-system-x86_64 v6.2.0 on debian-testing,
> and end up in a different situation but it is still a bit perplexing.

With my usual qemu test setup (built from a git a few weeks ago), no
uuid shows up unless explicitly set.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  5:05             ` Christoph Hellwig
@ 2022-04-11  5:54               ` Christoph Hellwig
  2022-04-11  6:01                 ` Klaus Jensen
                                   ` (2 more replies)
  0 siblings, 3 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-11  5:54 UTC (permalink / raw)
  To: Luis Chamberlain
  Cc: Christoph Hellwig, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme

On Mon, Apr 11, 2022 at 07:05:33AM +0200, Christoph Hellwig wrote:
> > However, what I'm seeing seems to show that the uuid is same uuid as
> > well when not using -device nvme-ns but just -device nvme (this is
> > called legacy now it seems?) without the uuid set you end up in the
> > situation I described. I just destroyed my guests and started from
> > scratch a set up using qemu-system-x86_64 v6.2.0 on debian-testing,
> > and end up in a different situation but it is still a bit perplexing.
> 
> With my usual qemu test setup (built from a git a few weeks ago), no
> uuid shows up unless explicitly set.

Digging a bit deeper this was "fixed" by:

5f4884c4412318a1adc105dea9cc28f7625ce730
Author: Klaus Jensen <k.jensen@samsung.com>
Date:   Mon Aug 9 12:34:40 2021 +0200

    hw/nvme: fix missing variable initializers

    Coverity found that 'uuid', 'csi' and 'eui64' are uninitialized.
    While we set most of the fields, we do not explicitly set the rsvd2
    field in the NvmeIdNsDescr header.
		         
    Fix this by explicitly zero-initializing the variables.

Note that even with the fix the uuid field is always reported, even
when it shouldn't - it just is that Linux handles a 0 UUID gracefully.

I can also not find any code that would assign a different uuid
when using a different command line syntax, but given how unusable
the new syntax is I've not actually been able to try it.

So I think for now we'll just need to disable identifier on qemu.

It would be great if qemu could switch to a new PCI ID after this is
fixed as that simplifies the quirking.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  5:54               ` Christoph Hellwig
@ 2022-04-11  6:01                 ` Klaus Jensen
  2022-04-11  6:09                   ` Christoph Hellwig
  2022-04-12 18:46                 ` Luis Chamberlain
  2022-04-18 23:30                 ` Alan Adamson
  2 siblings, 1 reply; 51+ messages in thread
From: Klaus Jensen @ 2022-04-11  6:01 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Luis Chamberlain, Keith Busch, Sagi Grimberg, linux-nvme

[-- Attachment #1: Type: text/plain, Size: 2068 bytes --]

On Apr 11 07:54, Christoph Hellwig wrote:
> On Mon, Apr 11, 2022 at 07:05:33AM +0200, Christoph Hellwig wrote:
> > > However, what I'm seeing seems to show that the uuid is same uuid as
> > > well when not using -device nvme-ns but just -device nvme (this is
> > > called legacy now it seems?) without the uuid set you end up in the
> > > situation I described. I just destroyed my guests and started from
> > > scratch a set up using qemu-system-x86_64 v6.2.0 on debian-testing,
> > > and end up in a different situation but it is still a bit perplexing.
> > 
> > With my usual qemu test setup (built from a git a few weeks ago), no
> > uuid shows up unless explicitly set.
> 
> Digging a bit deeper this was "fixed" by:
> 
> 5f4884c4412318a1adc105dea9cc28f7625ce730
> Author: Klaus Jensen <k.jensen@samsung.com>
> Date:   Mon Aug 9 12:34:40 2021 +0200
> 
>     hw/nvme: fix missing variable initializers
> 
>     Coverity found that 'uuid', 'csi' and 'eui64' are uninitialized.
>     While we set most of the fields, we do not explicitly set the rsvd2
>     field in the NvmeIdNsDescr header.
> 		         
>     Fix this by explicitly zero-initializing the variables.
> 
> Note that even with the fix the uuid field is always reported, even
> when it shouldn't - it just is that Linux handles a 0 UUID gracefully.
> 

Right.

> I can also not find any code that would assign a different uuid
> when using a different command line syntax, but given how unusable
> the new syntax is I've not actually been able to try it.
> 

Are you referring to -device nvme-ns "syntax"? Using -device nvme, you
cannot set uuid.

> So I think for now we'll just need to disable identifier on qemu.
> 
> It would be great if qemu could switch to a new PCI ID after this is
> fixed as that simplifies the quirking.

Luckily we can do that easier now since we moved away from the Intel id
(which got rid of a bunch of quirks at that time).

I'll see what we can come up with to fix this properly in QEMU.

Thanks for looking into it.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  6:01                 ` Klaus Jensen
@ 2022-04-11  6:09                   ` Christoph Hellwig
  2022-04-11  6:11                     ` Klaus Jensen
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-11  6:09 UTC (permalink / raw)
  To: Klaus Jensen
  Cc: Christoph Hellwig, Luis Chamberlain, Keith Busch, Sagi Grimberg,
	linux-nvme

On Mon, Apr 11, 2022 at 08:01:51AM +0200, Klaus Jensen wrote:
> 
> > I can also not find any code that would assign a different uuid
> > when using a different command line syntax, but given how unusable
> > the new syntax is I've not actually been able to try it.
> > 
> 
> Are you referring to -device nvme-ns "syntax"?

Yes.

> Luckily we can do that easier now since we moved away from the Intel id
> (which got rid of a bunch of quirks at that time).
> 
> I'll see what we can come up with to fix this properly in QEMU.

Thanks.  Please Cc me on the patches.  My qemu-foo is getting a little
rusty, but I should still be able to help with the spec side.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  6:09                   ` Christoph Hellwig
@ 2022-04-11  6:11                     ` Klaus Jensen
  0 siblings, 0 replies; 51+ messages in thread
From: Klaus Jensen @ 2022-04-11  6:11 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Luis Chamberlain, Keith Busch, Sagi Grimberg, linux-nvme

[-- Attachment #1: Type: text/plain, Size: 794 bytes --]

On Apr 11 08:09, Christoph Hellwig wrote:
> On Mon, Apr 11, 2022 at 08:01:51AM +0200, Klaus Jensen wrote:
> > 
> > > I can also not find any code that would assign a different uuid
> > > when using a different command line syntax, but given how unusable
> > > the new syntax is I've not actually been able to try it.
> > > 
> > 
> > Are you referring to -device nvme-ns "syntax"?
> 
> Yes.
> 

Point taken.

> > Luckily we can do that easier now since we moved away from the Intel id
> > (which got rid of a bunch of quirks at that time).
> > 
> > I'll see what we can come up with to fix this properly in QEMU.
> 
> Thanks.  Please Cc me on the patches.  My qemu-foo is getting a little
> rusty, but I should still be able to help with the spec side.

Will do. Thanks.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  5:54               ` Christoph Hellwig
  2022-04-11  6:01                 ` Klaus Jensen
@ 2022-04-12 18:46                 ` Luis Chamberlain
  2022-04-18 23:30                 ` Alan Adamson
  2 siblings, 0 replies; 51+ messages in thread
From: Luis Chamberlain @ 2022-04-12 18:46 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme

On Mon, Apr 11, 2022 at 07:54:55AM +0200, Christoph Hellwig wrote:
> It would be great if qemu could switch to a new PCI ID after this is
> fixed as that simplifies the quirking.

Thanks,  I think it's worth to re-iterate why:

The most directly visible problem is that this breaks the                                                                                                                                     
/dev/disk/by-id/ links - all devices would have to point to the same                                                                                                                          
one, and which one actually shows up is somewhat random.  This basically                                                                                                                      
means random corruption if people actually use the links or the uuid                                                                                                                          
functionality in any other way.

  Luis


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-11  5:54               ` Christoph Hellwig
  2022-04-11  6:01                 ` Klaus Jensen
  2022-04-12 18:46                 ` Luis Chamberlain
@ 2022-04-18 23:30                 ` Alan Adamson
  2022-04-20  7:36                   ` Christoph Hellwig
  2 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-04-18 23:30 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Luis Chamberlain, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme

I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
do not complete.  This was because the UUID for the passthru device is coming from the a device
from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.

This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
system, we have this issue.

I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
why for blktests to hardcode the IDs for the passthru devices?

Alan

diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..e3db53b232e6 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -159,6 +159,40 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 	return status;
 }
 
+static u16 nvmet_passthru_override_ns_id_desc(struct nvmet_req *req)
+{
+	u16 status = NVME_SC_SUCCESS;
+	char id_desc[2];
+	int i = 0;
+
+	status = nvmet_copy_from_sgl(req, i, id_desc, 2);
+	if (status)
+		return status;
+
+	while (id_desc[1]) {
+		int len;
+
+		len = id_desc[1];
+		i += 4;
+
+		if ((id_desc[0] == 1) || (id_desc[0] == 2) || (id_desc[0] == 3)) {
+			status = nvmet_copy_from_sgl(req, i, id_desc, 2);
+			if (status)
+				return status;
+			id_desc[0]++; /* Change 1st character of the NID */
+			status = nvmet_copy_to_sgl(req, i, id_desc, 2);
+			if (status)
+				return status;
+		} 
+		i += len;
+		status = nvmet_copy_from_sgl(req, i, id_desc, 2);
+		if (status)
+			return status;
+	}
+	return status;
+}
+
+
 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 {
 	struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
@@ -176,7 +210,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 		case NVME_ID_CNS_NS:
 			nvmet_passthru_override_id_ns(req);
 			break;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			nvmet_passthru_override_ns_id_desc(req);
+			break;
 		}
+
 	} else if (status < 0)
 		status = NVME_SC_INTERNAL;
 
-- 



> On Apr 10, 2022, at 10:54 PM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Mon, Apr 11, 2022 at 07:05:33AM +0200, Christoph Hellwig wrote:
>>> However, what I'm seeing seems to show that the uuid is same uuid as
>>> well when not using -device nvme-ns but just -device nvme (this is
>>> called legacy now it seems?) without the uuid set you end up in the
>>> situation I described. I just destroyed my guests and started from
>>> scratch a set up using qemu-system-x86_64 v6.2.0 on debian-testing,
>>> and end up in a different situation but it is still a bit perplexing.
>> 
>> With my usual qemu test setup (built from a git a few weeks ago), no
>> uuid shows up unless explicitly set.
> 
> Digging a bit deeper this was "fixed" by:
> 
> 5f4884c4412318a1adc105dea9cc28f7625ce730
> Author: Klaus Jensen <k.jensen@samsung.com>
> Date:   Mon Aug 9 12:34:40 2021 +0200
> 
>    hw/nvme: fix missing variable initializers
> 
>    Coverity found that 'uuid', 'csi' and 'eui64' are uninitialized.
>    While we set most of the fields, we do not explicitly set the rsvd2
>    field in the NvmeIdNsDescr header.
> 		         
>    Fix this by explicitly zero-initializing the variables.
> 
> Note that even with the fix the uuid field is always reported, even
> when it shouldn't - it just is that Linux handles a 0 UUID gracefully.
> 
> I can also not find any code that would assign a different uuid
> when using a different command line syntax, but given how unusable
> the new syntax is I've not actually been able to try it.
> 
> So I think for now we'll just need to disable identifier on qemu.
> 
> It would be great if qemu could switch to a new PCI ID after this is
> fixed as that simplifies the quirking.
> 


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-18 23:30                 ` Alan Adamson
@ 2022-04-20  7:36                   ` Christoph Hellwig
  2022-06-06 20:35                     ` Alan Adamson
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-04-20  7:36 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Keith Busch,
	Sagi Grimberg, linux-nvme

On Mon, Apr 18, 2022 at 11:30:33PM +0000, Alan Adamson wrote:
> I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
> do not complete.  This was because the UUID for the passthru device is coming from the a device
> from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.
> 
> This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
> system, we have this issue.
> 
> I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
> why for blktests to hardcode the IDs for the passthru devices?

Hmm.  I suspect the best thing would be to optionally just clear the
IDS entirely.  Optionally as in maybe a fabrics connect argument,
with it defaulting to true only for loop as that is per definition
local.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-04-20  7:36                   ` Christoph Hellwig
@ 2022-06-06 20:35                     ` Alan Adamson
  2022-06-06 21:38                       ` Keith Busch
  2022-06-08  7:52                       ` Christoph Hellwig
  0 siblings, 2 replies; 51+ messages in thread
From: Alan Adamson @ 2022-06-06 20:35 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Luis Chamberlain, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme



> On Apr 20, 2022, at 12:36 AM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Mon, Apr 18, 2022 at 11:30:33PM +0000, Alan Adamson wrote:
>> I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
>> do not complete.  This was because the UUID for the passthru device is coming from the a device
>> from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.
>> 
>> This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
>> system, we have this issue.
>> 
>> I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
>> why for blktests to hardcode the IDs for the passthru devices?
> 
> Hmm.  I suspect the best thing would be to optionally just clear the
> IDS entirely.  Optionally as in maybe a fabrics connect argument,
> with it defaulting to true only for loop as that is per definition
> local.

Here are the changes to support a clear-ids nvme connect argument.  Want to float these changes prior
to sending a formal patch out.

Changes to the nvme driver, nvme-cli, and blktests are required.

Thanks,

Alan


nvme driver changes:


diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 72f7c955c707..ea1274cdefda 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3852,9 +3852,15 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
 
 	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
 	if (ret) {
-		dev_err(ctrl->device,
-			"globally duplicate IDs for nsid %d\n", nsid);
-		return ret;
+		if (ctrl->opts && ctrl->opts->clear_ids) {
+			uuid_copy(&ids->uuid, &uuid_null);
+			memset(&ids->nguid, 0, sizeof(ids->nguid));
+			memset(&ids->eui64, 0, sizeof(ids->eui64));
+		} else {
+			dev_err(ctrl->device,
+				"globally duplicate IDs for nsid %d\n", nsid);
+			return ret;
+		}
 	}
 
 	mutex_lock(&ctrl->subsys->lock);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..0022767a3a37 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
 	{ NVMF_OPT_TOS,			"tos=%d"		},
 	{ NVMF_OPT_FAIL_FAST_TMO,	"fast_io_fail_tmo=%d"	},
 	{ NVMF_OPT_DISCOVERY,		"discovery"		},
+	{ NVMF_OPT_CLEAR_IDS,		"clear_ids"		},
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -571,6 +572,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 	opts->hdr_digest = false;
 	opts->data_digest = false;
 	opts->tos = -1; /* < 0 == use transport default */
+	opts->clear_ids = false;
 
 	options = o = kstrdup(buf, GFP_KERNEL);
 	if (!options)
@@ -593,6 +595,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			}
 			kfree(opts->transport);
 			opts->transport = p;
+			if (!strcmp(p, "loop"))
+				opts->clear_ids = true;
 			break;
 		case NVMF_OPT_NQN:
 			p = match_strdup(args);
@@ -829,6 +833,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 		case NVMF_OPT_DISCOVERY:
 			opts->discovery_nqn = true;
 			break;
+		case NVMF_OPT_CLEAR_IDS:
+			opts->clear_ids = true;
+			break;
 		default:
 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
 				p);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..ecf7f2e9fb4a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
 	NVMF_OPT_FAIL_FAST_TMO	= 1 << 20,
 	NVMF_OPT_HOST_IFACE	= 1 << 21,
 	NVMF_OPT_DISCOVERY	= 1 << 22,
+	NVMF_OPT_CLEAR_IDS	= 1 << 23,
 };
 
 /**
@@ -104,6 +105,7 @@ enum {
  * @nr_poll_queues: number of queues for polling I/O
  * @tos: type of service
  * @fast_io_fail_tmo: Fast I/O fail timeout in seconds
+ * @clear_ids: clear ids on connect
  */
 struct nvmf_ctrl_options {
 	unsigned		mask;
@@ -128,6 +130,7 @@ struct nvmf_ctrl_options {
 	unsigned int		nr_poll_queues;
 	int			tos;
 	int			fast_io_fail_tmo;
+	bool			clear_ids;
 };
 
 /*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f2a5e1ea508a..9e376bbf5eef 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2438,7 +2438,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS,
+			  NVMF_OPT_TOS | NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_rdma_create_ctrl,
 };
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d241b..7624baa11d19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2691,7 +2691,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE |
+			  NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_tcp_create_ctrl,
 };
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..1222f4c88aa4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -697,7 +697,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
 	.name		= "loop",
 	.module		= THIS_MODULE,
 	.create_ctrl	= nvme_loop_create_ctrl,
-	.allowed_opts	= NVMF_OPT_TRADDR,
+	.allowed_opts	= NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
 };
 
 static int __init nvme_loop_init_module(void)



nvme-cli changes:

diff --git a/fabrics.c b/fabrics.c
index b89de252f58d..b347e5f2706f 100644
--- a/fabrics.c
+++ b/fabrics.c
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (C) 2016 Intel Corporation. All rights reserved.
  * Copyright (c) 2016 HGST, a Western Digital Company.
@@ -78,6 +77,7 @@ static const char *nvmf_dup_connect   = "allow duplicate connections between same
 static const char *nvmf_disable_sqflow = "disable controller sq flow control (default false)";
 static const char *nvmf_hdr_digest     = "enable transport protocol header digest (TCP transport)";
 static const char *nvmf_data_digest    = "enable transport protocol data digest (TCP transport)";
+static const char *nvmf_clear_ids      = "Clear Namespace Identifiers upon connect";
 static const char *nvmf_config_file    = "Use specified JSON configuration file or 'none' to disable";
 
 #define NVMF_OPTS(c)                                                                   \
@@ -102,7 +102,8 @@ static const char *nvmf_config_file = "Use specified JSON configuration file or
        OPT_FLAG("duplicate-connect", 'D', &c.duplicate_connect,  nvmf_dup_connect),    \
        OPT_FLAG("disable-sqflow",    'd', &c.disable_sqflow,     nvmf_disable_sqflow), \
        OPT_FLAG("hdr-digest",        'g', &c.hdr_digest,         nvmf_hdr_digest),     \
-       OPT_FLAG("data-digest",       'G', &c.data_digest,        nvmf_data_digest)     \
+       OPT_FLAG("data-digest",       'G', &c.data_digest,        nvmf_data_digest),    \
+       OPT_FLAG("clear-ids",         'e', &c.clear_ids,          nvmf_clear_ids)       \
 
 struct tr_config {
        char *subsysnqn;

blktest changes (phase 1):

Since it will take a while for a new clear-ids enabled version of nvme-cli is out, we can just limit
the passthru tests to execute for loop.


diff --git a/tests/nvme/033 b/tests/nvme/033
index c6a3f7feb50e..5d6dc1fc2676 100755
--- a/tests/nvme/033
+++ b/tests/nvme/033
@@ -11,6 +11,7 @@ QUICK=1
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 
 nvme_info() {
diff --git a/tests/nvme/034 b/tests/nvme/034
index f92e5e20865b..3fa194466ad8 100755
--- a/tests/nvme/034
+++ b/tests/nvme/034
@@ -12,6 +12,7 @@ requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
        _have_fio
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/035 b/tests/nvme/035
index ee78a7586f35..160d39db11b7 100755
--- a/tests/nvme/035
+++ b/tests/nvme/035
@@ -14,6 +14,7 @@ requires() {
        _have_kernel_option NVME_TARGET_PASSTHRU
        _have_xfs
        _have_fio
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/036 b/tests/nvme/036
index 8218c6538dfd..4afb5684f2cf 100755
--- a/tests/nvme/036
+++ b/tests/nvme/036
@@ -11,6 +11,7 @@ QUICK=1
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/037 b/tests/nvme/037
index fc6c21343652..2e70ad19c0e0 100755
--- a/tests/nvme/037
+++ b/tests/nvme/037
@@ -10,6 +10,7 @@ DESCRIPTION="test deletion of NVMeOF passthru controllers immediately after setu
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 


blktest changes (phase 2):

Once a clear-ids enabled version of nvme-cli is available, the above
changes can be reverted and the below changes will work for all
trtypes.


diff --git a/tests/nvme/rc b/tests/nvme/rc
index ccdccf9cbf9a..3271e0e22c5b 100644
--- a/tests/nvme/rc
+++ b/tests/nvme/rc
@@ -172,7 +172,7 @@ _nvme_connect_subsys() {
        local traddr="${3:-$def_traddr}"
        local trsvcid="${4:-$def_trsvcid}"
 
-       ARGS=(-t "${trtype}" -n "${subsysnqn}")
+       ARGS=(-t "${trtype}" -n "${subsysnqn}" "${5}")
        if [[ "${trtype}" != "loop" ]]; then
                ARGS+=(-a "${traddr}" -s "${trsvcid}")
        fi
@@ -345,7 +345,9 @@ _nvmet_passthru_target_connect() {
        local trtype=$1
        local subsys_name=$2
 
-       _nvme_connect_subsys "${trtype}" "${subsys_name}" || return
+       # XXX - We will need to check nvme-cli version before using clear-ids
+       _nvme_connect_subsys "${trtype}" "${subsys_name}" "${def_traddr}" \
+           "${def_trsvcid}" "--clear-ids" || return
        nsdev=$(_find_nvme_passthru_loop_dev "${subsys_name}")
 
        # The following tests can race with the creation






^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 20:35                     ` Alan Adamson
@ 2022-06-06 21:38                       ` Keith Busch
  2022-06-06 21:51                         ` Alan Adamson
  2022-06-08  7:52                       ` Christoph Hellwig
  1 sibling, 1 reply; 51+ messages in thread
From: Keith Busch @ 2022-06-06 21:38 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Mon, Jun 06, 2022 at 08:35:39PM +0000, Alan Adamson wrote:
> 
> 
> > On Apr 20, 2022, at 12:36 AM, Christoph Hellwig <hch@lst.de> wrote:
> > 
> > On Mon, Apr 18, 2022 at 11:30:33PM +0000, Alan Adamson wrote:
> >> I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
> >> do not complete.  This was because the UUID for the passthru device is coming from the a device
> >> from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.
> >> 
> >> This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
> >> system, we have this issue.
> >> 
> >> I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
> >> why for blktests to hardcode the IDs for the passthru devices?
> > 
> > Hmm.  I suspect the best thing would be to optionally just clear the
> > IDS entirely.  Optionally as in maybe a fabrics connect argument,
> > with it defaulting to true only for loop as that is per definition
> > local.
> 
> Here are the changes to support a clear-ids nvme connect argument.  Want to float these changes prior
> to sending a formal patch out.

Shouldn't the nvme-passthrough target just change the cntlid so it looks like
two controllers in the same subsystem? That would just need to stop overriding
the subsysnqn, which is what currently happens.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 21:38                       ` Keith Busch
@ 2022-06-06 21:51                         ` Alan Adamson
  2022-06-06 21:58                           ` Keith Busch
  0 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-06-06 21:51 UTC (permalink / raw)
  To: Keith Busch
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme



> On Jun 6, 2022, at 2:38 PM, Keith Busch <kbusch@kernel.org> wrote:
> 
> On Mon, Jun 06, 2022 at 08:35:39PM +0000, Alan Adamson wrote:
>> 
>> 
>>> On Apr 20, 2022, at 12:36 AM, Christoph Hellwig <hch@lst.de> wrote:
>>> 
>>> On Mon, Apr 18, 2022 at 11:30:33PM +0000, Alan Adamson wrote:
>>>> I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
>>>> do not complete.  This was because the UUID for the passthru device is coming from the a device
>>>> from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.
>>>> 
>>>> This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
>>>> system, we have this issue.
>>>> 
>>>> I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
>>>> why for blktests to hardcode the IDs for the passthru devices?
>>> 
>>> Hmm.  I suspect the best thing would be to optionally just clear the
>>> IDS entirely.  Optionally as in maybe a fabrics connect argument,
>>> with it defaulting to true only for loop as that is per definition
>>> local.
>> 
>> Here are the changes to support a clear-ids nvme connect argument.  Want to float these changes prior
>> to sending a formal patch out.
> 
> Shouldn't the nvme-passthrough target just change the cntlid so it looks like
> two controllers in the same subsystem? That would just need to stop overriding
> the subsysnqn, which is what currently happens.

That was my original proposal a while ago, though I wasn’t certain what to change. Should the UUID/GUID just be zeroed out?  Is it ok to do that for all passthru targets?

Alan


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 21:51                         ` Alan Adamson
@ 2022-06-06 21:58                           ` Keith Busch
  2022-06-06 23:11                             ` Alan Adamson
  0 siblings, 1 reply; 51+ messages in thread
From: Keith Busch @ 2022-06-06 21:58 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Mon, Jun 06, 2022 at 09:51:55PM +0000, Alan Adamson wrote:
> > On Jun 6, 2022, at 2:38 PM, Keith Busch <kbusch@kernel.org> wrote:
> > 
> > Shouldn't the nvme-passthrough target just change the cntlid so it looks like
> > two controllers in the same subsystem? That would just need to stop overriding
> > the subsysnqn, which is what currently happens.
> 
> That was my original proposal a while ago, though I wasn’t certain what to change. Should the UUID/GUID just be zeroed out?  Is it ok to do that for all passthru targets?

Can't the namespace id's be left the unmodified? The loop and pcie targets
would just look like two paths to the same namespace, which is essentially what
they are.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 21:58                           ` Keith Busch
@ 2022-06-06 23:11                             ` Alan Adamson
  2022-06-07 19:01                               ` Keith Busch
  2022-06-08  7:48                               ` Christoph Hellwig
  0 siblings, 2 replies; 51+ messages in thread
From: Alan Adamson @ 2022-06-06 23:11 UTC (permalink / raw)
  To: Keith Busch
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme



> On Jun 6, 2022, at 2:58 PM, Keith Busch <kbusch@kernel.org> wrote:
> 
> On Mon, Jun 06, 2022 at 09:51:55PM +0000, Alan Adamson wrote:
>>> On Jun 6, 2022, at 2:38 PM, Keith Busch <kbusch@kernel.org> wrote:
>>> 
>>> Shouldn't the nvme-passthrough target just change the cntlid so it looks like
>>> two controllers in the same subsystem? That would just need to stop overriding
>>> the subsysnqn, which is what currently happens.
>> 
>> That was my original proposal a while ago, though I wasn’t certain what to change. Should the UUID/GUID just be zeroed out?  Is it ok to do that for all passthru targets?
> 
> Can't the namespace id's be left the unmodified? The loop and pcie targets
> would just look like two paths to the same namespace, which is essentially what
> they are.

nvme_global_check_duplicate_ids() fails when the id’s are the same.  They are kinda
shared namespaces and we can set nmic on the loop target, but the pcie target is
unaware of it being shared.

Alan

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 23:11                             ` Alan Adamson
@ 2022-06-07 19:01                               ` Keith Busch
  2022-06-08  7:48                               ` Christoph Hellwig
  1 sibling, 0 replies; 51+ messages in thread
From: Keith Busch @ 2022-06-07 19:01 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Mon, Jun 06, 2022 at 11:11:22PM +0000, Alan Adamson wrote:
> 
> 
> > On Jun 6, 2022, at 2:58 PM, Keith Busch <kbusch@kernel.org> wrote:
> > 
> > On Mon, Jun 06, 2022 at 09:51:55PM +0000, Alan Adamson wrote:
> >>> On Jun 6, 2022, at 2:38 PM, Keith Busch <kbusch@kernel.org> wrote:
> >>> 
> >>> Shouldn't the nvme-passthrough target just change the cntlid so it looks like
> >>> two controllers in the same subsystem? That would just need to stop overriding
> >>> the subsysnqn, which is what currently happens.
> >> 
> >> That was my original proposal a while ago, though I wasn’t certain what to change. Should the UUID/GUID just be zeroed out?  Is it ok to do that for all passthru targets?
> > 
> > Can't the namespace id's be left the unmodified? The loop and pcie targets
> > would just look like two paths to the same namespace, which is essentially what
> > they are.
> 
> nvme_global_check_duplicate_ids() fails when the id’s are the same.  They are kinda
> shared namespaces and we can set nmic on the loop target, but the pcie target is
> unaware of it being shared.

Gotcha, the pcie target potentially lacking cmic/nmic awareness does appear to
make my idea unreasonable to implement. Thanks for the follow up info.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 23:11                             ` Alan Adamson
  2022-06-07 19:01                               ` Keith Busch
@ 2022-06-08  7:48                               ` Christoph Hellwig
  1 sibling, 0 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-08  7:48 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Keith Busch, Christoph Hellwig, Luis Chamberlain, Klaus Jensen,
	Sagi Grimberg, linux-nvme

On Mon, Jun 06, 2022 at 11:11:22PM +0000, Alan Adamson wrote:
> nvme_global_check_duplicate_ids() fails when the id’s are the same.  They are kinda
> shared namespaces and we can set nmic on the loop target, but the pcie target is
> unaware of it being shared.

Yeah, we'd have to change cmic and nmic reporting for the underlying
device (which doesn't have to be PCIe) exported through the target when
the loop driver is used.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 20:35                     ` Alan Adamson
  2022-06-06 21:38                       ` Keith Busch
@ 2022-06-08  7:52                       ` Christoph Hellwig
  2022-06-08 18:11                         ` Alan Adamson
  1 sibling, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-08  7:52 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Keith Busch,
	Sagi Grimberg, linux-nvme

On Mon, Jun 06, 2022 at 08:35:39PM +0000, Alan Adamson wrote:
> nvme driver changes:

Thanks! 

>  
>  	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
>  	if (ret) {
> -		dev_err(ctrl->device,
> -			"globally duplicate IDs for nsid %d\n", nsid);
> -		return ret;
> +		if (ctrl->opts && ctrl->opts->clear_ids) {
> +			uuid_copy(&ids->uuid, &uuid_null);
> +			memset(&ids->nguid, 0, sizeof(ids->nguid));
> +			memset(&ids->eui64, 0, sizeof(ids->eui64));
> +		} else {
> +			dev_err(ctrl->device,
> +				"globally duplicate IDs for nsid %d\n", nsid);
> +			return ret;
> +		}

I don't think this is the right place to clear the reported IDs.
The proper place would be in the target code in
nvmet_passthru_override_id_ns and a new
nvmet_passthru_override_id_ns_desc like it.  Otherwise we only catch
the kernel driver uses and not other users of the IDs.

> @@ -593,6 +595,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
>  			}
>  			kfree(opts->transport);
>  			opts->transport = p;
> +			if (!strcmp(p, "loop"))
> +				opts->clear_ids = true;

And maybe add a comment here.



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-08  7:52                       ` Christoph Hellwig
@ 2022-06-08 18:11                         ` Alan Adamson
  2022-06-08 19:04                           ` Keith Busch
  0 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-06-08 18:11 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Luis Chamberlain, Klaus Jensen, Keith Busch, Sagi Grimberg, linux-nvme



> On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Mon, Jun 06, 2022 at 08:35:39PM +0000, Alan Adamson wrote:
>> nvme driver changes:
> 
> Thanks! 
> 
>> 
>> 	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
>> 	if (ret) {
>> -		dev_err(ctrl->device,
>> -			"globally duplicate IDs for nsid %d\n", nsid);
>> -		return ret;
>> +		if (ctrl->opts && ctrl->opts->clear_ids) {
>> +			uuid_copy(&ids->uuid, &uuid_null);
>> +			memset(&ids->nguid, 0, sizeof(ids->nguid));
>> +			memset(&ids->eui64, 0, sizeof(ids->eui64));
>> +		} else {
>> +			dev_err(ctrl->device,
>> +				"globally duplicate IDs for nsid %d\n", nsid);
>> +			return ret;
>> +		}
> 
> I don't think this is the right place to clear the reported IDs.
> The proper place would be in the target code in
> nvmet_passthru_override_id_ns and a new
> nvmet_passthru_override_id_ns_desc like it.  Otherwise we only catch
> the kernel driver uses and not other users of the IDs.

How do we get the clear_ids setting from the connect to the target?

Thanks,

Alan

> 
>> @@ -593,6 +595,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
>> 			}
>> 			kfree(opts->transport);
>> 			opts->transport = p;
>> +			if (!strcmp(p, "loop"))
>> +				opts->clear_ids = true;
> 
> And maybe add a comment here.
> 



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-08 18:11                         ` Alan Adamson
@ 2022-06-08 19:04                           ` Keith Busch
  2022-06-09  0:30                             ` Chaitanya Kulkarni
  2022-06-09  3:53                             ` Christoph Hellwig
  0 siblings, 2 replies; 51+ messages in thread
From: Keith Busch @ 2022-06-08 19:04 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
> > On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
> 
> How do we get the clear_ids setting from the connect to the target?

I'm assuming something like this (untested):

---
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..3eaf463d0567 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -687,6 +687,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
 static const struct nvmet_fabrics_ops nvme_loop_ops = {
 	.owner		= THIS_MODULE,
 	.type		= NVMF_TRTYPE_LOOP,
+	.flags		= NVMF_CLEAR_NS_DESCS,
 	.add_port	= nvme_loop_add_port,
 	.remove_port	= nvme_loop_remove_port,
 	.queue_response = nvme_loop_queue_response,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..03ce045af54e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -300,6 +300,7 @@ struct nvmet_fabrics_ops {
 	unsigned int flags;
 #define NVMF_KEYED_SGLS			(1 << 0)
 #define NVMF_METADATA_SUPPORTED		(1 << 1)
+#define NVMF_CLEAR_NS_DESCS		(1 << 3)
 	void (*queue_response)(struct nvmet_req *req);
 	int (*add_port)(struct nvmet_port *port);
 	void (*remove_port)(struct nvmet_port *port);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b1f7efab3918..9fa5bc0e26d0 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,19 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
 		ctrl->cap &= ~(1ULL << 43);
 }
 
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	u16 ret = NVME_SC_SUCCESS;
+
+	if (ctrl->ops->flags & NVMF_CLEAR_NS_DESCS) {
+		ret = NVME_SC_INVALID_FIELD;
+		nvmet_zero_sgl(req, 0, 0x1000);
+	}
+
+	return ret;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -176,6 +189,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 		case NVME_ID_CNS_NS:
 			nvmet_passthru_override_id_ns(req);
 			break;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			nvmet_passthru_override_id_descs(req);
+			break;
 		}
 	} else if (status < 0)
 		status = NVME_SC_INTERNAL;
--


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-08 19:04                           ` Keith Busch
@ 2022-06-09  0:30                             ` Chaitanya Kulkarni
  2022-06-09 15:11                               ` Alan Adamson
  2022-06-09  3:53                             ` Christoph Hellwig
  1 sibling, 1 reply; 51+ messages in thread
From: Chaitanya Kulkarni @ 2022-06-09  0:30 UTC (permalink / raw)
  To: Keith Busch, Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On 6/8/22 12:04, Keith Busch wrote:
> On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
>>> On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
>>
>> How do we get the clear_ids setting from the connect to the target?
> 
> I'm assuming something like this (untested):
> 
> ---
> diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
> index 59024af2da2e..3eaf463d0567 100644
> --- a/drivers/nvme/target/loop.c
> +++ b/drivers/nvme/target/loop.c
> @@ -687,6 +687,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
>   static const struct nvmet_fabrics_ops nvme_loop_ops = {
>   	.owner		= THIS_MODULE,
>   	.type		= NVMF_TRTYPE_LOOP,
> +	.flags		= NVMF_CLEAR_NS_DESCS,
>   	.add_port	= nvme_loop_add_port,
>   	.remove_port	= nvme_loop_remove_port,
>   	.queue_response = nvme_loop_queue_response,
> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
> index 69818752a33a..03ce045af54e 100644
> --- a/drivers/nvme/target/nvmet.h
> +++ b/drivers/nvme/target/nvmet.h
> @@ -300,6 +300,7 @@ struct nvmet_fabrics_ops {
>   	unsigned int flags;
>   #define NVMF_KEYED_SGLS			(1 << 0)
>   #define NVMF_METADATA_SUPPORTED		(1 << 1)
> +#define NVMF_CLEAR_NS_DESCS		(1 << 3)
>   	void (*queue_response)(struct nvmet_req *req);
>   	int (*add_port)(struct nvmet_port *port);
>   	void (*remove_port)(struct nvmet_port *port);
> diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
> index b1f7efab3918..9fa5bc0e26d0 100644
> --- a/drivers/nvme/target/passthru.c
> +++ b/drivers/nvme/target/passthru.c
> @@ -30,6 +30,19 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
>   		ctrl->cap &= ~(1ULL << 43);
>   }
>   
> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
> +{
> +	struct nvmet_ctrl *ctrl = req->sq->ctrl;
> +	u16 ret = NVME_SC_SUCCESS;
> +
> +	if (ctrl->ops->flags & NVMF_CLEAR_NS_DESCS) {
> +		ret = NVME_SC_INVALID_FIELD;
> +		nvmet_zero_sgl(req, 0, 0x1000);
> +	}
> +
> +	return ret;
> +}
> +
>   static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
>   {
>   	struct nvmet_ctrl *ctrl = req->sq->ctrl;
> @@ -176,6 +189,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
>   		case NVME_ID_CNS_NS:
>   			nvmet_passthru_override_id_ns(req);
>   			break;
> +		case NVME_ID_CNS_NS_DESC_LIST:
> +			nvmet_passthru_override_id_descs(req);
> +			break;
>   		}
>   	} else if (status < 0)
>   		status = NVME_SC_INTERNAL;
> --
> 

this problem exists from early days of passthru,
I think something like will be definitely useful.

Alan, can you give it a test ?

-ck



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-08 19:04                           ` Keith Busch
  2022-06-09  0:30                             ` Chaitanya Kulkarni
@ 2022-06-09  3:53                             ` Christoph Hellwig
  2022-06-10  0:27                               ` Alan Adamson
  1 sibling, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-09  3:53 UTC (permalink / raw)
  To: Keith Busch
  Cc: Alan Adamson, Christoph Hellwig, Luis Chamberlain, Klaus Jensen,
	Sagi Grimberg, linux-nvme

On Wed, Jun 08, 2022 at 01:04:48PM -0600, Keith Busch wrote:
> On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
> > > On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
> > 
> > How do we get the clear_ids setting from the connect to the target?
> 
> I'm assuming something like this (untested):

This is a good start.  I think we still want to allow setting it
in the fabrics opts to allow clearing it for say a local passthrough
tcp connection.

And of course clear the identifiers in Identify Namespace as well.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-09  0:30                             ` Chaitanya Kulkarni
@ 2022-06-09 15:11                               ` Alan Adamson
  0 siblings, 0 replies; 51+ messages in thread
From: Alan Adamson @ 2022-06-09 15:11 UTC (permalink / raw)
  To: Chaitanya Kulkarni
  Cc: Keith Busch, Christoph Hellwig, Luis Chamberlain, Klaus Jensen,
	Sagi Grimberg, linux-nvme



> On Jun 8, 2022, at 5:30 PM, Chaitanya Kulkarni <chaitanyak@nvidia.com> wrote:
> 
> On 6/8/22 12:04, Keith Busch wrote:
>> On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
>>>> On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
>>> 
>>> How do we get the clear_ids setting from the connect to the target?
>> 
>> I'm assuming something like this (untested):
>> 
>> ---
>> diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
>> index 59024af2da2e..3eaf463d0567 100644
>> --- a/drivers/nvme/target/loop.c
>> +++ b/drivers/nvme/target/loop.c
>> @@ -687,6 +687,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
>>  static const struct nvmet_fabrics_ops nvme_loop_ops = {
>>  	.owner		= THIS_MODULE,
>>  	.type		= NVMF_TRTYPE_LOOP,
>> +	.flags		= NVMF_CLEAR_NS_DESCS,
>>  	.add_port	= nvme_loop_add_port,
>>  	.remove_port	= nvme_loop_remove_port,
>>  	.queue_response = nvme_loop_queue_response,
>> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
>> index 69818752a33a..03ce045af54e 100644
>> --- a/drivers/nvme/target/nvmet.h
>> +++ b/drivers/nvme/target/nvmet.h
>> @@ -300,6 +300,7 @@ struct nvmet_fabrics_ops {
>>  	unsigned int flags;
>>  #define NVMF_KEYED_SGLS			(1 << 0)
>>  #define NVMF_METADATA_SUPPORTED		(1 << 1)
>> +#define NVMF_CLEAR_NS_DESCS		(1 << 3)
>>  	void (*queue_response)(struct nvmet_req *req);
>>  	int (*add_port)(struct nvmet_port *port);
>>  	void (*remove_port)(struct nvmet_port *port);
>> diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
>> index b1f7efab3918..9fa5bc0e26d0 100644
>> --- a/drivers/nvme/target/passthru.c
>> +++ b/drivers/nvme/target/passthru.c
>> @@ -30,6 +30,19 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
>>  		ctrl->cap &= ~(1ULL << 43);
>>  }
>> 
>> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
>> +{
>> +	struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> +	u16 ret = NVME_SC_SUCCESS;
>> +
>> +	if (ctrl->ops->flags & NVMF_CLEAR_NS_DESCS) {
>> +		ret = NVME_SC_INVALID_FIELD;
>> +		nvmet_zero_sgl(req, 0, 0x1000);
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>>  static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
>>  {
>>  	struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> @@ -176,6 +189,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
>>  		case NVME_ID_CNS_NS:
>>  			nvmet_passthru_override_id_ns(req);
>>  			break;
>> +		case NVME_ID_CNS_NS_DESC_LIST:
>> +			nvmet_passthru_override_id_descs(req);
>> +			break;
>>  		}
>>  	} else if (status < 0)
>>  		status = NVME_SC_INTERNAL;
>> --
>> 
> 
> this problem exists from early days of passthru,
> I think something like will be definitely useful.
> 
> Alan, can you give it a test ?

Yes, found a issue.  Working on a fix.

Alan

> 
> -ck
> 
> 



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-09  3:53                             ` Christoph Hellwig
@ 2022-06-10  0:27                               ` Alan Adamson
  2022-06-10 14:12                                 ` Keith Busch
  0 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-06-10  0:27 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Keith Busch, Luis Chamberlain, Klaus Jensen, Sagi Grimberg, linux-nvme



> On Jun 8, 2022, at 8:53 PM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Wed, Jun 08, 2022 at 01:04:48PM -0600, Keith Busch wrote:
>> On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
>>>> On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch@lst.de> wrote:
>>> 
>>> How do we get the clear_ids setting from the connect to the target?
>> 
>> I'm assuming something like this (untested):
> 
> This is a good start.  I think we still want to allow setting it
> in the fabrics opts to allow clearing it for say a local passthrough
> tcp connection.
> 
> And of course clear the identifiers in Identify Namespace as well.


This code works for nvme_trtype=loop.  Like Chris said, we should still have a nvme-cli
option to allow tcp to work.

Alan

diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..c7448c11c87f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
        NVMF_OPT_FAIL_FAST_TMO  = 1 << 20,
        NVMF_OPT_HOST_IFACE     = 1 << 21,
        NVMF_OPT_DISCOVERY      = 1 << 22,
+       NVMF_OPT_CLEAR_IDS      = 1 << 23,
 };
 
 /**
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..6ba9e4bb011c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -687,6 +687,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
 static const struct nvmet_fabrics_ops nvme_loop_ops = {
        .owner          = THIS_MODULE,
        .type           = NVMF_TRTYPE_LOOP,
+       .flags          = NVMF_CLEAR_NS_DESCS,
        .add_port       = nvme_loop_add_port,
        .remove_port    = nvme_loop_remove_port,
        .queue_response = nvme_loop_queue_response,
@@ -697,7 +698,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
        .name           = "loop",
        .module         = THIS_MODULE,
        .create_ctrl    = nvme_loop_create_ctrl,
-       .allowed_opts   = NVMF_OPT_TRADDR,
+       .allowed_opts   = NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
 };
 
 static int __init nvme_loop_init_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..facd9706d67c 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -300,6 +300,7 @@ struct nvmet_fabrics_ops {
        unsigned int flags;
 #define NVMF_KEYED_SGLS                        (1 << 0)
 #define NVMF_METADATA_SUPPORTED                (1 << 1)
+#define        NVMF_CLEAR_NS_DESCS             (1 << 2)
        void (*queue_response)(struct nvmet_req *req);
        int (*add_port)(struct nvmet_port *port);
        void (*remove_port)(struct nvmet_port *port);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..2f182c7f35f5 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,37 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
                ctrl->cap &= ~(1ULL << 43);
 }
 
+
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvme_ns_id_desc *data, *cur;
+       u16 status = NVME_SC_SUCCESS;
+
+       if (!(ctrl->ops->flags & NVMF_CLEAR_NS_DESCS))
+               return status;
+
+       data = kzalloc(0x1000, GFP_KERNEL);
+       if (!data)
+               return NVME_SC_INTERNAL;
+
+       status = nvmet_copy_from_sgl(req, 0, data, 0x1000);
+       if (status)
+               goto out_free;
+
+       cur = data;
+       cur->nidt = NVME_NIDT_CSI;
+       cur->nidl = NVME_NIDT_CSI_LEN;
+       cur++;
+       cur->nidt = 0;
+
+       status = nvmet_copy_to_sgl(req, 0, data, 0x1000);
+
+out_free:
+       kfree(data);
+       return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +158,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 
 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 {
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
        u16 status = NVME_SC_SUCCESS;
        struct nvme_id_ns *id;
        int i;
@@ -152,6 +184,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
         */
        id->mc = 0;
 
+       if (ctrl->ops->flags & NVMF_CLEAR_NS_DESCS) {
+               memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+               memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+       }
+
        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +213,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
                case NVME_ID_CNS_NS:
                        nvmet_passthru_override_id_ns(req);
                        break;
+               case NVME_ID_CNS_NS_DESC_LIST:
+                       nvmet_passthru_override_id_descs(req);
+                       break;
                }
        } else if (status < 0)
                status = NVME_SC_INTERNAL;




^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-10  0:27                               ` Alan Adamson
@ 2022-06-10 14:12                                 ` Keith Busch
  2022-06-15 20:15                                   ` Alan Adamson
  0 siblings, 1 reply; 51+ messages in thread
From: Keith Busch @ 2022-06-10 14:12 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Fri, Jun 10, 2022 at 12:27:24AM +0000, Alan Adamson wrote:
> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
> +{
> +       struct nvmet_ctrl *ctrl = req->sq->ctrl;
> +       struct nvme_ns_id_desc *data, *cur;
> +       u16 status = NVME_SC_SUCCESS;
> +
> +       if (!(ctrl->ops->flags & NVMF_CLEAR_NS_DESCS))
> +               return status;
> +
> +       data = kzalloc(0x1000, GFP_KERNEL);
> +       if (!data)
> +               return NVME_SC_INTERNAL;
> +
> +       status = nvmet_copy_from_sgl(req, 0, data, 0x1000);
> +       if (status)
> +               goto out_free;
> +
> +       cur = data;
> +       cur->nidt = NVME_NIDT_CSI;
> +       cur->nidl = NVME_NIDT_CSI_LEN;
> +       cur++;
> +       cur->nidt = 0;

I don't think the above is correct without setting the CSI value. It's just
going to get whatever the controller happened to return at this offset, which
may be a completely differnt identifier type. I think you'd actually need to
search the descriptor list for the NIDT_CSI field and then copy just that one
into what gets returned.

And the "cur++" is just going to move the pointer past the descriptor header,
but doesn't include the descriptor's total length, so setting cur->nidt is
going to corrupt the actual descriptor. You have to add the previous's NIDL to
the cur address.

Otherwise, the rest looks fine.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-10 14:12                                 ` Keith Busch
@ 2022-06-15 20:15                                   ` Alan Adamson
  2022-06-17  9:01                                     ` Christoph Hellwig
  0 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-06-15 20:15 UTC (permalink / raw)
  To: Keith Busch
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

Here are my latest changes.  It includes an interface from nvme-cli to specify
to clear the IDs upon connect.

Wasn’t sure the proper way to send a clear_ids argument to the target so I followed
what kato did.  Not sure if that is appropriate for this case.

This will require a nvme-cli change and blktests changes.  I tested with both loop and tcp.

Alan



diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..773da1fae961 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -376,6 +376,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	 */
 	cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
 
+	cmd.connect.clear_ids = ctrl->opts->clear_ids;
+
 	if (ctrl->opts->disable_sqflow)
 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
 
@@ -548,6 +550,7 @@ static const match_table_t opt_tokens = {
 	{ NVMF_OPT_TOS,			"tos=%d"		},
 	{ NVMF_OPT_FAIL_FAST_TMO,	"fast_io_fail_tmo=%d"	},
 	{ NVMF_OPT_DISCOVERY,		"discovery"		},
+	{ NVMF_OPT_CLEAR_IDS,		"clear_ids"		},
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -571,6 +574,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 	opts->hdr_digest = false;
 	opts->data_digest = false;
 	opts->tos = -1; /* < 0 == use transport default */
+	opts->clear_ids = false;
 
 	options = o = kstrdup(buf, GFP_KERNEL);
 	if (!options)
@@ -593,6 +597,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			}
 			kfree(opts->transport);
 			opts->transport = p;
+			/* By default, clear the ids for loop passthru */
+			if (!strcmp(p, "loop"))
+				opts->clear_ids = true;
 			break;
 		case NVMF_OPT_NQN:
 			p = match_strdup(args);
@@ -829,6 +836,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 		case NVMF_OPT_DISCOVERY:
 			opts->discovery_nqn = true;
 			break;
+		case NVMF_OPT_CLEAR_IDS:
+			opts->clear_ids = true;
+			break;
 		default:
 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
 				p);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..0fc08465e8a3 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
 	NVMF_OPT_FAIL_FAST_TMO	= 1 << 20,
 	NVMF_OPT_HOST_IFACE	= 1 << 21,
 	NVMF_OPT_DISCOVERY	= 1 << 22,
+	NVMF_OPT_CLEAR_IDS	= 1 << 23,
 };
 
 /**
@@ -128,6 +129,7 @@ struct nvmf_ctrl_options {
 	unsigned int		nr_poll_queues;
 	int			tos;
 	int			fast_io_fail_tmo;
+	bool			clear_ids;
 };
 
 /*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f2a5e1ea508a..9e376bbf5eef 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2438,7 +2438,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS,
+			  NVMF_OPT_TOS | NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_rdma_create_ctrl,
 };
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d241b..7624baa11d19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2691,7 +2691,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE |
+			  NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_tcp_create_ctrl,
 };
 
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..b10c915829b8 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -212,6 +212,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
 		goto out;
 
 	ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
+	ctrl->clear_ids = c->clear_ids;
 
 	uuid_copy(&ctrl->hostid, &d->hostid);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..1222f4c88aa4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -697,7 +697,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
 	.name		= "loop",
 	.module		= THIS_MODULE,
 	.create_ctrl	= nvme_loop_create_ctrl,
-	.allowed_opts	= NVMF_OPT_TRADDR,
+	.allowed_opts	= NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
 };
 
 static int __init nvme_loop_init_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..3f5b5a9b2e54 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -209,6 +209,7 @@ struct nvmet_ctrl {
 	u64			err_counter;
 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
 	bool			pi_support;
+	bool			clear_ids;
 };
 
 struct nvmet_subsys {
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..9498ba417a9e 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,56 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
 		ctrl->cap &= ~(1ULL << 43);
 }
 
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	void *data;
+	struct nvme_ns_id_desc *cur;
+	u16 status = NVME_SC_SUCCESS;
+	u8 csi;
+	int pos, len;
+	bool csi_seen;
+
+	if (!ctrl->clear_ids)
+		return status;
+
+	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+	if (!data)
+		return NVME_SC_INTERNAL;
+
+	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+	if (status)
+		goto out_free;
+
+	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+		cur = data + pos;
+
+		if (cur->nidl == 0)
+			break;
+		len = cur->nidl;
+		if (cur->nidt == NVME_NIDT_CSI) {
+			memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
+			csi_seen = 1;
+			break;
+		}
+		len += sizeof(struct nvme_ns_id_desc);
+	}
+	if (csi_seen) {
+		cur = data;
+		cur->nidt = NVME_NIDT_CSI;
+		cur->nidl = NVME_NIDT_CSI_LEN;
+		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
+
+		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
+		cur->nidt = 0;
+		cur->nidl = 0;
+		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+	}
+out_free:
+	kfree(data);
+	return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +177,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 
 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 {
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	u16 status = NVME_SC_SUCCESS;
 	struct nvme_id_ns *id;
 	int i;
@@ -152,6 +203,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 	 */
 	id->mc = 0;
 
+	if (ctrl->clear_ids) {
+		memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+		memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+	}
+
 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +232,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 		case NVME_ID_CNS_NS:
 			nvmet_passthru_override_id_ns(req);
 			break;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			nvmet_passthru_override_id_descs(req);
+			break;
 		}
 	} else if (status < 0)
 		status = NVME_SC_INTERNAL;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 29ec3e3481ff..f45e96413ffb 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1467,7 +1467,8 @@ struct nvmf_connect_command {
 	__u8		cattr;
 	__u8		resv3;
 	__le32		kato;
-	__u8		resv4[12];
+	__u8		clear_ids;
+	__u8		resv4[11];
 };
 
 struct nvmf_connect_data {

> On Jun 10, 2022, at 7:12 AM, Keith Busch <kbusch@kernel.org> wrote:
> 
> On Fri, Jun 10, 2022 at 12:27:24AM +0000, Alan Adamson wrote:
>> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
>> +{
>> +       struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> +       struct nvme_ns_id_desc *data, *cur;
>> +       u16 status = NVME_SC_SUCCESS;
>> +
>> +       if (!(ctrl->ops->flags & NVMF_CLEAR_NS_DESCS))
>> +               return status;
>> +
>> +       data = kzalloc(0x1000, GFP_KERNEL);
>> +       if (!data)
>> +               return NVME_SC_INTERNAL;
>> +
>> +       status = nvmet_copy_from_sgl(req, 0, data, 0x1000);
>> +       if (status)
>> +               goto out_free;
>> +
>> +       cur = data;
>> +       cur->nidt = NVME_NIDT_CSI;
>> +       cur->nidl = NVME_NIDT_CSI_LEN;
>> +       cur++;
>> +       cur->nidt = 0;
> 
> I don't think the above is correct without setting the CSI value. It's just
> going to get whatever the controller happened to return at this offset, which
> may be a completely differnt identifier type. I think you'd actually need to
> search the descriptor list for the NIDT_CSI field and then copy just that one
> into what gets returned.
> 
> And the "cur++" is just going to move the pointer past the descriptor header,
> but doesn't include the descriptor's total length, so setting cur->nidt is
> going to corrupt the actual descriptor. You have to add the previous's NIDL to
> the cur address.
> 
> Otherwise, the rest looks fine.


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-15 20:15                                   ` Alan Adamson
@ 2022-06-17  9:01                                     ` Christoph Hellwig
  2022-06-21 18:40                                       ` Alan Adamson
  0 siblings, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-17  9:01 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Keith Busch, Christoph Hellwig, Luis Chamberlain, Klaus Jensen,
	Sagi Grimberg, linux-nvme

On Wed, Jun 15, 2022 at 08:15:18PM +0000, Alan Adamson wrote:
> Here are my latest changes.  It includes an interface from nvme-cli to specify
> to clear the IDs upon connect.
> 
> Wasn’t sure the proper way to send a clear_ids argument to the target so I followed
> what kato did.  Not sure if that is appropriate for this case.
> 
> This will require a nvme-cli change and blktests changes.  I tested with both loop and tcp.

Well, it changes the nvme over the wire protocol, we can't just do
that.

And the option really is a target side one, that is we want to configure
it using configfs on the target, not the fabrics command line options.

That is, add a new clear_ids attribute to nvmet_passthru_attrs, save
the flag in struct nvmet_subsys, and default to true for loop controllers.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-17  9:01                                     ` Christoph Hellwig
@ 2022-06-21 18:40                                       ` Alan Adamson
  2022-06-21 19:11                                         ` Keith Busch
  0 siblings, 1 reply; 51+ messages in thread
From: Alan Adamson @ 2022-06-21 18:40 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Keith Busch, Luis Chamberlain, Klaus Jensen, Sagi Grimberg, linux-nvme



> On Jun 17, 2022, at 2:01 AM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Wed, Jun 15, 2022 at 08:15:18PM +0000, Alan Adamson wrote:
>> Here are my latest changes.  It includes an interface from nvme-cli to specify
>> to clear the IDs upon connect.
>> 
>> Wasn’t sure the proper way to send a clear_ids argument to the target so I followed
>> what kato did.  Not sure if that is appropriate for this case.
>> 
>> This will require a nvme-cli change and blktests changes.  I tested with both loop and tcp.
> 
> Well, it changes the nvme over the wire protocol, we can't just do
> that.
> 
> And the option really is a target side one, that is we want to configure
> it using configfs on the target, not the fabrics command line options.
> 
> That is, add a new clear_ids attribute to nvmet_passthru_attrs, save
> the flag in struct nvmet_subsys, and default to true for loop controllers.

Latest changes.  I like the idea setting clear_ids using configfs.

Alan


diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e44b2988759e..ff77c3d2354f 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
 }
 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
 
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+	unsigned int clear_ids;
+
+	if (kstrtouint(page, 0, &clear_ids))
+		return -EINVAL;
+	subsys->clear_ids = clear_ids;
+	return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
 static struct configfs_attribute *nvmet_passthru_attrs[] = {
 	&nvmet_passthru_attr_device_path,
 	&nvmet_passthru_attr_enable,
 	&nvmet_passthru_attr_admin_timeout,
 	&nvmet_passthru_attr_io_timeout,
+	&nvmet_passthru_attr_clear_ids,
 	NULL,
 };
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 90e75324dae0..d4203d582343 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1374,6 +1374,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	ctrl->port = req->port;
 	ctrl->ops = req->ops;
 
+	/* By default, set loop targets to clear IDS by default */
+	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+		subsys->clear_ids = 1;
+
 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
 	INIT_LIST_HEAD(&ctrl->async_events);
 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..2b3e5719f24e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -249,6 +249,7 @@ struct nvmet_subsys {
 	struct config_group	passthru_group;
 	unsigned int		admin_timeout;
 	unsigned int		io_timeout;
+	unsigned int		clear_ids;
 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
 
 #ifdef CONFIG_BLK_DEV_ZONED
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b1f7efab3918..d0587b10895c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,56 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
 		ctrl->cap &= ~(1ULL << 43);
 }
 
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	void *data;
+	struct nvme_ns_id_desc *cur;
+	u16 status = NVME_SC_SUCCESS;
+	u8 csi;
+	int pos, len;
+	bool csi_seen;
+
+	if (!ctrl->subsys->clear_ids)
+		return status;
+
+	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+	if (!data)
+		return NVME_SC_INTERNAL;
+
+	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+	if (status)
+		goto out_free;
+
+	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+		cur = data + pos;
+
+		if (cur->nidl == 0)
+			break;
+		len = cur->nidl;
+		if (cur->nidt == NVME_NIDT_CSI) {
+			memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
+			csi_seen = 1;
+			break;
+		}
+		len += sizeof(struct nvme_ns_id_desc);
+	}
+	if (csi_seen) {
+		cur = data;
+		cur->nidt = NVME_NIDT_CSI;
+		cur->nidl = NVME_NIDT_CSI_LEN;
+		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
+
+		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
+		cur->nidt = 0;
+		cur->nidl = 0;
+		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+	}
+out_free:
+	kfree(data);
+	return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +177,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 
 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 {
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	u16 status = NVME_SC_SUCCESS;
 	struct nvme_id_ns *id;
 	int i;
@@ -152,6 +203,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 	 */
 	id->mc = 0;
 
+	if (ctrl->subsys->clear_ids) {
+		memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+		memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+	}
+
 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +232,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 		case NVME_ID_CNS_NS:
 			nvmet_passthru_override_id_ns(req);
 			break;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			nvmet_passthru_override_id_descs(req);
+			break;
 		}
 	} else if (status < 0)
 		status = NVME_SC_INTERNAL;

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-21 18:40                                       ` Alan Adamson
@ 2022-06-21 19:11                                         ` Keith Busch
  2022-06-21 20:39                                           ` Alan Adamson
  2022-06-22 11:00                                           ` Christoph Hellwig
  0 siblings, 2 replies; 51+ messages in thread
From: Keith Busch @ 2022-06-21 19:11 UTC (permalink / raw)
  To: Alan Adamson
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme

On Tue, Jun 21, 2022 at 06:40:49PM +0000, Alan Adamson wrote:
> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
> +{
> +	struct nvmet_ctrl *ctrl = req->sq->ctrl;
> +	void *data;
> +	struct nvme_ns_id_desc *cur;
> +	u16 status = NVME_SC_SUCCESS;
> +	u8 csi;
> +	int pos, len;
> +	bool csi_seen;
> +
> +	if (!ctrl->subsys->clear_ids)
> +		return status;
> +
> +	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
> +	if (!data)
> +		return NVME_SC_INTERNAL;
> +
> +	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
> +	if (status)
> +		goto out_free;
> +
> +	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
> +		cur = data + pos;
> +
> +		if (cur->nidl == 0)
> +			break;
> +		len = cur->nidl;
> +		if (cur->nidt == NVME_NIDT_CSI) {
> +			memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
> +			csi_seen = 1;
> +			break;
> +		}
> +		len += sizeof(struct nvme_ns_id_desc);
> +	}
> +	if (csi_seen) {
> +		cur = data;
> +		cur->nidt = NVME_NIDT_CSI;
> +		cur->nidl = NVME_NIDT_CSI_LEN;
> +		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
> +
> +		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
> +		cur->nidt = 0;
> +		cur->nidl = 0;
> +		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
> +	}

This is clearing the other descriptors only if the controller also reports a
CSI field. I think just do something like the following on top of your patch,
and should be good to go.

---
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f863cd459652..f9599b0cd129 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -54,17 +54,15 @@ static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
 		}
 		len += sizeof(struct nvme_ns_id_desc);
 	}
+
+	memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
 	if (csi_seen) {
 		cur = data;
 		cur->nidt = NVME_NIDT_CSI;
 		cur->nidl = NVME_NIDT_CSI_LEN;
 		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
-
-		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
-		cur->nidt = 0;
-		cur->nidl = 0;
-		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
 	}
+	status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
 out_free:
 	kfree(data);
 	return status;
--


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-21 19:11                                         ` Keith Busch
@ 2022-06-21 20:39                                           ` Alan Adamson
  2022-06-22 11:00                                           ` Christoph Hellwig
  1 sibling, 0 replies; 51+ messages in thread
From: Alan Adamson @ 2022-06-21 20:39 UTC (permalink / raw)
  To: Keith Busch
  Cc: Christoph Hellwig, Luis Chamberlain, Klaus Jensen, Sagi Grimberg,
	linux-nvme



> On Jun 21, 2022, at 12:11 PM, Keith Busch <kbusch@kernel.org> wrote:
> 
> On Tue, Jun 21, 2022 at 06:40:49PM +0000, Alan Adamson wrote:
>> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
>> +{
>> +	struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> +	void *data;
>> +	struct nvme_ns_id_desc *cur;
>> +	u16 status = NVME_SC_SUCCESS;
>> +	u8 csi;
>> +	int pos, len;
>> +	bool csi_seen;
>> +
>> +	if (!ctrl->subsys->clear_ids)
>> +		return status;
>> +
>> +	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
>> +	if (!data)
>> +		return NVME_SC_INTERNAL;
>> +
>> +	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
>> +	if (status)
>> +		goto out_free;
>> +
>> +	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
>> +		cur = data + pos;
>> +
>> +		if (cur->nidl == 0)
>> +			break;
>> +		len = cur->nidl;
>> +		if (cur->nidt == NVME_NIDT_CSI) {
>> +			memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
>> +			csi_seen = 1;
>> +			break;
>> +		}
>> +		len += sizeof(struct nvme_ns_id_desc);
>> +	}
>> +	if (csi_seen) {
>> +		cur = data;
>> +		cur->nidt = NVME_NIDT_CSI;
>> +		cur->nidl = NVME_NIDT_CSI_LEN;
>> +		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
>> +
>> +		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
>> +		cur->nidt = 0;
>> +		cur->nidl = 0;
>> +		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
>> +	}
> 
> This is clearing the other descriptors only if the controller also reports a
> CSI field. I think just do something like the following on top of your patch,
> and should be good to go.
> 
> ---
> diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
> index f863cd459652..f9599b0cd129 100644
> --- a/drivers/nvme/target/passthru.c
> +++ b/drivers/nvme/target/passthru.c
> @@ -54,17 +54,15 @@ static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
> 		}
> 		len += sizeof(struct nvme_ns_id_desc);
> 	}
> +
> +	memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
> 	if (csi_seen) {
> 		cur = data;
> 		cur->nidt = NVME_NIDT_CSI;
> 		cur->nidl = NVME_NIDT_CSI_LEN;
> 		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
> -
> -		cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
> -		cur->nidt = 0;
> -		cur->nidl = 0;
> -		status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
> 	}
> +	status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
> out_free:
> 	kfree(data);
> 	return status;
> --

Yes I can do that.  Originally I was doing that, but wanted to avoid the 4K copy of zeros.

Thanks,

Alan



^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-21 19:11                                         ` Keith Busch
  2022-06-21 20:39                                           ` Alan Adamson
@ 2022-06-22 11:00                                           ` Christoph Hellwig
  2022-06-22 15:45                                             ` Alan Adamson
  1 sibling, 1 reply; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-22 11:00 UTC (permalink / raw)
  To: Keith Busch
  Cc: Alan Adamson, Christoph Hellwig, Luis Chamberlain, Klaus Jensen,
	Sagi Grimberg, linux-nvme

On Tue, Jun 21, 2022 at 01:11:33PM -0600, Keith Busch wrote:
> This is clearing the other descriptors only if the controller also reports a
> CSI field. I think just do something like the following on top of your patch,
> and should be good to go.

Agreed.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-22 11:00                                           ` Christoph Hellwig
@ 2022-06-22 15:45                                             ` Alan Adamson
  0 siblings, 0 replies; 51+ messages in thread
From: Alan Adamson @ 2022-06-22 15:45 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Keith Busch, Luis Chamberlain, Klaus Jensen, Sagi Grimberg, linux-nvme



> On Jun 22, 2022, at 4:00 AM, Christoph Hellwig <hch@lst.de> wrote:
> 
> On Tue, Jun 21, 2022 at 01:11:33PM -0600, Keith Busch wrote:
>> This is clearing the other descriptors only if the controller also reports a
>> CSI field. I think just do something like the following on top of your patch,
>> and should be good to go.
> 
> Agreed.

I’ll send out a real patch request in a bit, but here is the latest.  Also the blktests changes.

Alan


diff --git a/tests/nvme/rc b/tests/nvme/rc
index 998b181..ddfa78a 100644
--- a/tests/nvme/rc
+++ b/tests/nvme/rc
@@ -277,6 +277,7 @@ _create_nvmet_passthru() {
 
        _test_dev_nvme_ctrl > "${passthru_path}/device_path"
        echo 1 > "${passthru_path}/enable"
+       echo 1 > "${passthru_path}/clear_ids"
 }
 
 _remove_nvmet_passhtru() {







diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e44b2988759e..ff77c3d2354f 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
 }
 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
 
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+	unsigned int clear_ids;
+
+	if (kstrtouint(page, 0, &clear_ids))
+		return -EINVAL;
+	subsys->clear_ids = clear_ids;
+	return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
 static struct configfs_attribute *nvmet_passthru_attrs[] = {
 	&nvmet_passthru_attr_device_path,
 	&nvmet_passthru_attr_enable,
 	&nvmet_passthru_attr_admin_timeout,
 	&nvmet_passthru_attr_io_timeout,
+	&nvmet_passthru_attr_clear_ids,
 	NULL,
 };
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 90e75324dae0..d4203d582343 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1374,6 +1374,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	ctrl->port = req->port;
 	ctrl->ops = req->ops;
 
+	/* By default, set loop targets to clear IDS by default */
+	if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+		subsys->clear_ids = 1;
+
 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
 	INIT_LIST_HEAD(&ctrl->async_events);
 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..2b3e5719f24e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -249,6 +249,7 @@ struct nvmet_subsys {
 	struct config_group	passthru_group;
 	unsigned int		admin_timeout;
 	unsigned int		io_timeout;
+	unsigned int		clear_ids;
 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
 
 #ifdef CONFIG_BLK_DEV_ZONED
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b1f7efab3918..e8c033fbdb5c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,55 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
 		ctrl->cap &= ~(1ULL << 43);
 }
 
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	void *data;
+	struct nvme_ns_id_desc *cur;
+	u16 status = NVME_SC_SUCCESS;
+	u8 csi;
+	int pos, len;
+	bool csi_seen;
+
+	if (!ctrl->subsys->clear_ids)
+		return status;
+
+	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+	if (!data)
+		return NVME_SC_INTERNAL;
+
+	status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+	if (status)
+		goto out_free;
+
+	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+		cur = data + pos;
+
+		if (cur->nidl == 0)
+			break;
+		len = cur->nidl;
+		if (cur->nidt == NVME_NIDT_CSI) {
+			memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc),
+			       NVME_NIDT_CSI_LEN);
+			csi_seen = 1;
+			break;
+		}
+		len += sizeof(struct nvme_ns_id_desc);
+	}
+
+	memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
+	if (csi_seen) {
+		cur = data;
+		cur->nidt = NVME_NIDT_CSI;
+		cur->nidl = NVME_NIDT_CSI_LEN;
+		memcpy(data + sizeof(struct nvme_ns_id_desc), &csi,  NVME_NIDT_CSI_LEN);
+	}
+	status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+out_free:
+	kfree(data);
+	return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +176,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 
 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 {
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	u16 status = NVME_SC_SUCCESS;
 	struct nvme_id_ns *id;
 	int i;
@@ -152,6 +202,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 	 */
 	id->mc = 0;
 
+	if (ctrl->subsys->clear_ids) {
+		memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+		memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+	}
+
 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +231,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
 		case NVME_ID_CNS_NS:
 			nvmet_passthru_override_id_ns(req);
 			break;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			nvmet_passthru_override_id_descs(req);
+			break;
 		}
 	} else if (status < 0)
 		status = NVME_SC_INTERNAL;



^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 14:51   ` Keith Busch
@ 2022-06-06 15:06     ` Christoph Hellwig
  0 siblings, 0 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-06 15:06 UTC (permalink / raw)
  To: Keith Busch; +Cc: Christoph Hellwig, Stefan, Sagi Grimberg, linux-nvme

On Mon, Jun 06, 2022 at 08:51:34AM -0600, Keith Busch wrote:
> But just fyi, this sounds like the same issue as the phison controller from a
> different brand reported below, so it sounds more widespread than just this
> particular adata model.
> 
>   https://bugzilla.kernel.org/show_bug.cgi?id=216049
> 
> The reported didn't provide the device id, though, so we don't have a viable
> patch for that product just yet. Perhaps we could add the vid:did to some of
> the messages to help shorten the report-quirk cycle.

Yes, that's probably useful.  I've also been wondering if we need a
kernel command line interface to enable quirks for given IDs, similar
to what SCSI has.  With the amount of amazingly crappy consumer devices
our number of quirks will keep growing exponentially unless we can finally
get the NVMe interop group to actually test for these kinds of real life
bug instead of silly crap like status code precedence.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06 12:42   ` Stefan
@ 2022-06-06 15:03     ` Christoph Hellwig
  0 siblings, 0 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-06 15:03 UTC (permalink / raw)
  To: Stefan; +Cc: Christoph Hellwig, Keith Busch, Sagi Grimberg, linux-nvme

On Mon, Jun 06, 2022 at 12:42:41PM +0000, Stefan wrote:
> Will do, but just to clarify: Is this a problem with my specific drive or
> with the controller? FWICT my drives use a Phison PS5016-E16, which seems
> quite common in consumer PCIe 4.0 SSDs.

In general ID reporting like this is a firmware issue, so it is not
directly related to the controller hardware.  However for cheap
consumer drives most vendors simply take the default reference firmware
and ship it with the minimum required changes.  So if Phison fucked this
up, chances are it will be in most of these controllers.

> If I add the quirk for 1cc1:5350 it would only override it for this
> specific ADATA/XPG model, correct?

Yes.  We have to way to find out what actual controller hardware or
reference firmware a given device is based off.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06  6:40 ` Christoph Hellwig
  2022-06-06 12:42   ` Stefan
@ 2022-06-06 14:51   ` Keith Busch
  2022-06-06 15:06     ` Christoph Hellwig
  1 sibling, 1 reply; 51+ messages in thread
From: Keith Busch @ 2022-06-06 14:51 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Stefan, Sagi Grimberg, linux-nvme

On Mon, Jun 06, 2022 at 08:40:55AM +0200, Christoph Hellwig wrote:
> On Sun, Jun 05, 2022 at 01:58:07AM +0000, Stefan wrote:
> > Ran into a boot failure on my workstation today and I traced it to this
> > commit, reverting it on v5.18.1 makes it work. Quad-nvme boot raid.
> > 
> > I saw some follow-ups adding quirks, so I added NVME_QUIRK_BOGUS_NID to my
> > drives (1cc1:5350 for reference) and that worked too - if this is ok for an
> > actual patch let me know.
> 
> Yes, please send the patch, the controller reports a non-uniqueue EUI64,
> the rest seems to be not reported at all and thus actually ok.

I am unable to reply to Stefan for some bizarre gpg issues...

But just fyi, this sounds like the same issue as the phison controller from a
different brand reported below, so it sounds more widespread than just this
particular adata model.

  https://bugzilla.kernel.org/show_bug.cgi?id=216049

The reported didn't provide the device id, though, so we don't have a viable
patch for that product just yet. Perhaps we could add the vid:did to some of
the messages to help shorten the report-quirk cycle.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-06  6:40 ` Christoph Hellwig
@ 2022-06-06 12:42   ` Stefan
  2022-06-06 15:03     ` Christoph Hellwig
  2022-06-06 14:51   ` Keith Busch
  1 sibling, 1 reply; 51+ messages in thread
From: Stefan @ 2022-06-06 12:42 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Keith Busch, Sagi Grimberg, linux-nvme

> On Sun, Jun 05, 2022 at 01:58:07AM +0000, Stefan wrote:
>
> > Ran into a boot failure on my workstation today and I traced it to this
> > commit, reverting it on v5.18.1 makes it work. Quad-nvme boot raid.
> >
> > I saw some follow-ups adding quirks, so I added NVME_QUIRK_BOGUS_NID to my
> > drives (1cc1:5350 for reference) and that worked too - if this is ok for an
> > actual patch let me know.
>
>
> Yes, please send the patch, the controller reports a non-uniqueue EUI64,
> the rest seems to be not reported at all and thus actually ok.

Will do, but just to clarify: Is this a problem with my specific drive or
with the controller? FWICT my drives use a Phison PS5016-E16, which seems
quite common in consumer PCIe 4.0 SSDs.

If I add the quirk for 1cc1:5350 it would only override it for this
specific ADATA/XPG model, correct?


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
  2022-06-05  1:58 [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Stefan
@ 2022-06-06  6:40 ` Christoph Hellwig
  2022-06-06 12:42   ` Stefan
  2022-06-06 14:51   ` Keith Busch
  0 siblings, 2 replies; 51+ messages in thread
From: Christoph Hellwig @ 2022-06-06  6:40 UTC (permalink / raw)
  To: Stefan; +Cc: Christoph Hellwig, Keith Busch, Sagi Grimberg, linux-nvme

On Sun, Jun 05, 2022 at 01:58:07AM +0000, Stefan wrote:
> Ran into a boot failure on my workstation today and I traced it to this
> commit, reverting it on v5.18.1 makes it work. Quad-nvme boot raid.
> 
> I saw some follow-ups adding quirks, so I added NVME_QUIRK_BOGUS_NID to my
> drives (1cc1:5350 for reference) and that worked too - if this is ok for an
> actual patch let me know.

Yes, please send the patch, the controller reports a non-uniqueue EUI64,
the rest seems to be not reported at all and thus actually ok.


^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
@ 2022-06-05  1:58 Stefan
  2022-06-06  6:40 ` Christoph Hellwig
  0 siblings, 1 reply; 51+ messages in thread
From: Stefan @ 2022-06-05  1:58 UTC (permalink / raw)
  To: Christoph Hellwig, Keith Busch, Sagi Grimberg; +Cc: linux-nvme

On 2/24/22 20:28, Christoph Hellwig wrote:
> Add a check to verify that the unique identifiers are unique globally
> in addition to the existing check that verifies that they are unique
> inside a single subsystem.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Hi list!

(not following nvme-dev, sorry if reported before)

Ran into a boot failure on my workstation today and I traced it to this
commit, reverting it on v5.18.1 makes it work. Quad-nvme boot raid.

I saw some follow-ups adding quirks, so I added NVME_QUIRK_BOGUS_NID to my
drives (1cc1:5350 for reference) and that worked too - if this is ok for an
actual patch let me know.

Prints error "globally duplicate IDs for nsid 1". Symptom is one drive
(appears to be random every boot?) not showing up in /dev/disk/by-id, other 3
are there and accessible. Happens 100%, no chance at mapping my raid.

System info:

# nvme list

Node          Generic     SN            Model           Namespace Usage                     Format        FW Rev
------------- ----------- ------------- --------------- --------- ------------------------- ------------- --------
/dev/nvme3n1  /dev/ng3n1  PJ5220001302  XPG GAMMIX S50  1           1.00  TB /   1.00  TB   4 KiB +  0 B  EGFM11.2
/dev/nvme2n1  /dev/ng2n1  PJ5220001308  XPG GAMMIX S50  1           1.00  TB /   1.00  TB   4 KiB +  0 B  EGFM11.2
/dev/nvme1n1  /dev/ng1n1  PJ3820001439  XPG GAMMIX S50  1           1.00  TB /   1.00  TB   4 KiB +  0 B  EGFM11.2
/dev/nvme0n1  /dev/ng0n1  PJ3820001437  XPG GAMMIX S50  1           1.00  TB /   1.00  TB   4 KiB +  0 B  EGFM11.2

# cat /sys/class/nvme/nvmeX/nvmeXn1/nsid
1

Out of curiosity, I tried printk'ing the values being compared here, and it's
the same for all drives:

   uuid: 00000000-0000-0000-0000-000000000000
  nguid: empty?
  eui64: random (?), but always same on all drives


Open to provide more info,
Thanks,

~ Stefan



^ permalink raw reply	[flat|nested] 51+ messages in thread

end of thread, other threads:[~2022-06-22 15:46 UTC | newest]

Thread overview: 51+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-24 19:28 properly validate the nvme uniqueue identifiers are unique v2 Christoph Hellwig
2022-02-24 19:28 ` [PATCH 1/4] nvme: cleanup __nvme_check_ids Christoph Hellwig
2022-02-24 22:50   ` Chaitanya Kulkarni
2022-02-24 19:28 ` [PATCH 2/4] nvme: fix the check for duplicate unique identifiers Christoph Hellwig
2022-02-24 22:51   ` Chaitanya Kulkarni
2022-02-24 19:28 ` [PATCH 3/4] nvme: check for duplicate identifiers earlier Christoph Hellwig
2022-02-24 22:52   ` Chaitanya Kulkarni
2022-02-24 19:28 ` [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Christoph Hellwig
2022-02-24 22:54   ` Chaitanya Kulkarni
2022-04-08  1:04   ` Luis Chamberlain
2022-04-08  5:29     ` Christoph Hellwig
2022-04-08  7:19       ` Klaus Jensen
2022-04-08 16:10         ` Christoph Hellwig
2022-04-08 17:46           ` Luis Chamberlain
2022-04-11  5:05             ` Christoph Hellwig
2022-04-11  5:54               ` Christoph Hellwig
2022-04-11  6:01                 ` Klaus Jensen
2022-04-11  6:09                   ` Christoph Hellwig
2022-04-11  6:11                     ` Klaus Jensen
2022-04-12 18:46                 ` Luis Chamberlain
2022-04-18 23:30                 ` Alan Adamson
2022-04-20  7:36                   ` Christoph Hellwig
2022-06-06 20:35                     ` Alan Adamson
2022-06-06 21:38                       ` Keith Busch
2022-06-06 21:51                         ` Alan Adamson
2022-06-06 21:58                           ` Keith Busch
2022-06-06 23:11                             ` Alan Adamson
2022-06-07 19:01                               ` Keith Busch
2022-06-08  7:48                               ` Christoph Hellwig
2022-06-08  7:52                       ` Christoph Hellwig
2022-06-08 18:11                         ` Alan Adamson
2022-06-08 19:04                           ` Keith Busch
2022-06-09  0:30                             ` Chaitanya Kulkarni
2022-06-09 15:11                               ` Alan Adamson
2022-06-09  3:53                             ` Christoph Hellwig
2022-06-10  0:27                               ` Alan Adamson
2022-06-10 14:12                                 ` Keith Busch
2022-06-15 20:15                                   ` Alan Adamson
2022-06-17  9:01                                     ` Christoph Hellwig
2022-06-21 18:40                                       ` Alan Adamson
2022-06-21 19:11                                         ` Keith Busch
2022-06-21 20:39                                           ` Alan Adamson
2022-06-22 11:00                                           ` Christoph Hellwig
2022-06-22 15:45                                             ` Alan Adamson
2022-02-24 19:38 ` properly validate the nvme uniqueue identifiers are unique v2 Keith Busch
2022-06-05  1:58 [PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique Stefan
2022-06-06  6:40 ` Christoph Hellwig
2022-06-06 12:42   ` Stefan
2022-06-06 15:03     ` Christoph Hellwig
2022-06-06 14:51   ` Keith Busch
2022-06-06 15:06     ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.