All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvmet: per-host namespaces masking
@ 2021-02-05  9:29 Hannes Reinecke
  2021-02-05  9:32 ` Christoph Hellwig
  2021-02-05 18:42 ` Sagi Grimberg
  0 siblings, 2 replies; 5+ messages in thread
From: Hannes Reinecke @ 2021-02-05  9:29 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Chaitanya Kulkarni, linux-nvme, Sagi Grimberg, Keith Busch,
	Hannes Reinecke

Implement per-host namespaces masking, allowing to expose
different namespaces to different hosts connecting to the
same subsystem.
The original method of adding host nqns to the subsystem
takes precedence over the per-host namespace masking.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/target/admin-cmd.c | 16 +++++++
 drivers/nvme/target/configfs.c  | 82 +++++++++++++++++++++++++++++++++
 drivers/nvme/target/core.c      | 21 ++++++++-
 drivers/nvme/target/nvmet.h     |  4 ++
 4 files changed, 122 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 613a4d8feac1..d63fd33200db 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -489,6 +489,13 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
 		goto done;
 	}
 
+	if (!nvmet_ns_host_allowed(req->ns, ctrl->hostnqn)) {
+		status = NVME_SC_INVALID_NS;
+		nvmet_put_namespace(req->ns);
+		req->ns = NULL;
+		goto out;
+	}
+
 	nvmet_ns_revalidate(req->ns);
 
 	/*
@@ -566,6 +573,8 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
 	}
 
 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+		if (!nvmet_ns_host_allowed(ns, ctrl->hostnqn))
+			continue;
 		if (ns->nsid <= min_nsid)
 			continue;
 		list[i++] = cpu_to_le32(ns->nsid);
@@ -613,6 +622,13 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
 		goto out;
 	}
+	if (!nvmet_ns_host_allowed(req->ns, req->sq->ctrl->hostnqn)) {
+		nvmet_put_namespace(req->ns);
+		req->ns = NULL;
+		req->error_loc = offsetof(struct nvme_identify, nsid);
+		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+		goto out;
+	}
 
 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 635a7cb45d0b..5c85291eb16a 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -607,6 +607,85 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
 	NULL,
 };
 
+static int nvmet_ns_allowed_hosts_allow_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(parent->ci_parent);
+	struct nvmet_host *host;
+	struct nvmet_host_link *link, *p;
+	int ret;
+
+	if (target->ci_type != &nvmet_host_type) {
+		pr_err("can only link hosts into the allowed_hosts directory!\n");
+		return -EINVAL;
+	}
+
+	host = to_host(target);
+	link = kmalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+	link->host = host;
+
+	down_write(&nvmet_config_sem);
+	ret = -EINVAL;
+	if (ns->subsys->allow_any_host) {
+		pr_err("can't add hosts when allow_any_host is set!\n");
+		goto out_free_link;
+	}
+
+	ret = -EEXIST;
+	list_for_each_entry(p, &ns->subsys->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+			goto out_free_link;
+	}
+	list_for_each_entry(p, &ns->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+			goto out_free_link;
+	}
+	list_add_tail(&link->entry, &ns->hosts);
+	nvmet_subsys_disc_changed(ns->subsys, host);
+
+	up_write(&nvmet_config_sem);
+	return 0;
+out_free_link:
+	up_write(&nvmet_config_sem);
+	kfree(link);
+	return ret;
+}
+
+static void nvmet_ns_allowed_hosts_drop_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(parent->ci_parent);
+	struct nvmet_host *host = to_host(target);
+	struct nvmet_host_link *p;
+
+	down_write(&nvmet_config_sem);
+	list_for_each_entry(p, &ns->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+			goto found;
+	}
+	up_write(&nvmet_config_sem);
+	return;
+
+found:
+	list_del(&p->entry);
+	nvmet_subsys_disc_changed(ns->subsys, host);
+
+	up_write(&nvmet_config_sem);
+	kfree(p);
+}
+
+static struct configfs_item_operations nvmet_ns_allowed_hosts_item_ops = {
+	.allow_link		= nvmet_ns_allowed_hosts_allow_link,
+	.drop_link		= nvmet_ns_allowed_hosts_drop_link,
+};
+
+static const struct config_item_type nvmet_ns_allowed_hosts_type = {
+	.ct_item_ops		= &nvmet_ns_allowed_hosts_item_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
 static void nvmet_ns_release(struct config_item *item)
 {
 	struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -647,6 +726,9 @@ static struct config_group *nvmet_ns_make(struct config_group *group,
 	if (!ns)
 		goto out;
 	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
+	config_group_init_type_name(&ns->allowed_hosts_group,
+				    "allowed_hosts", &nvmet_ns_allowed_hosts_type);
+	configfs_add_default_group(&ns->allowed_hosts_group, &ns->group);
 
 	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8ce4d59cc9e7..d62a49f8a6e0 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -673,6 +673,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
 	ns->nsid = nsid;
 	ns->subsys = subsys;
+	INIT_LIST_HEAD(&ns->hosts);
 
 	down_write(&nvmet_ana_sem);
 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
@@ -1225,9 +1226,24 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
 	return 0;
 }
 
+bool nvmet_ns_host_allowed(struct nvmet_ns *ns, const char *hostnqn)
+{
+	struct nvmet_host_link *p;
+
+	lockdep_assert_held(&nvmet_config_sem);
+
+	list_for_each_entry(p, &ns->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), hostnqn))
+			return true;
+	}
+	return false;
+}
+
 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
 {
 	struct nvmet_host_link *p;
+	struct nvmet_ns *ns;
+	unsigned long idx;
 
 	lockdep_assert_held(&nvmet_config_sem);
 
@@ -1241,7 +1257,10 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
 			return true;
 	}
-
+	xa_for_each(&subsys->namespaces, idx, ns) {
+		if (nvmet_ns_host_allowed(ns, hostnqn))
+			return true;
+	}
 	return false;
 }
 
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 8776dd1a0490..7d2e5814cfa3 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -70,6 +70,9 @@ struct nvmet_ns {
 	struct nvmet_subsys	*subsys;
 	const char		*device_path;
 
+	struct list_head	hosts;
+
+	struct config_group	allowed_hosts_group;
 	struct config_group	device_group;
 	struct config_group	group;
 
@@ -518,6 +521,7 @@ extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
 extern u64 nvmet_ana_chgcnt;
 extern struct rw_semaphore nvmet_ana_sem;
 
+bool nvmet_ns_host_allowed(struct nvmet_ns *ns, const char *hostnqn);
 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
 
 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
-- 
2.29.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] nvmet: per-host namespaces masking
  2021-02-05  9:29 [PATCH] nvmet: per-host namespaces masking Hannes Reinecke
@ 2021-02-05  9:32 ` Christoph Hellwig
  2021-02-05  9:38   ` Hannes Reinecke
  2021-02-05 18:42 ` Sagi Grimberg
  1 sibling, 1 reply; 5+ messages in thread
From: Christoph Hellwig @ 2021-02-05  9:32 UTC (permalink / raw)
  To: Hannes Reinecke
  Cc: Chaitanya Kulkarni, linux-nvme, Christoph Hellwig, Keith Busch,
	Sagi Grimberg

On Fri, Feb 05, 2021 at 10:29:26AM +0100, Hannes Reinecke wrote:
> Implement per-host namespaces masking, allowing to expose
> different namespaces to different hosts connecting to the
> same subsystem.
> The original method of adding host nqns to the subsystem
> takes precedence over the per-host namespace masking.

Why do you want to expose a single subsystem to multiple hosts?  This
creates all kinds of covert channels better avoided.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] nvmet: per-host namespaces masking
  2021-02-05  9:32 ` Christoph Hellwig
@ 2021-02-05  9:38   ` Hannes Reinecke
  2021-02-05  9:40     ` Christoph Hellwig
  0 siblings, 1 reply; 5+ messages in thread
From: Hannes Reinecke @ 2021-02-05  9:38 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Chaitanya Kulkarni, linux-nvme, Sagi Grimberg, Keith Busch

On 2/5/21 10:32 AM, Christoph Hellwig wrote:
> On Fri, Feb 05, 2021 at 10:29:26AM +0100, Hannes Reinecke wrote:
>> Implement per-host namespaces masking, allowing to expose
>> different namespaces to different hosts connecting to the
>> same subsystem.
>> The original method of adding host nqns to the subsystem
>> takes precedence over the per-host namespace masking.
> 
> Why do you want to expose a single subsystem to multiple hosts?  This
> creates all kinds of covert channels better avoided.
> 
But the alternative is a subsystem per host.
Which leads to a proliferation of subsystems on the target.
And I wasn't quite keen on that.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke                Kernel Storage Architect
hare@suse.de                              +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] nvmet: per-host namespaces masking
  2021-02-05  9:38   ` Hannes Reinecke
@ 2021-02-05  9:40     ` Christoph Hellwig
  0 siblings, 0 replies; 5+ messages in thread
From: Christoph Hellwig @ 2021-02-05  9:40 UTC (permalink / raw)
  To: Hannes Reinecke
  Cc: Chaitanya Kulkarni, linux-nvme, Christoph Hellwig, Keith Busch,
	Sagi Grimberg

On Fri, Feb 05, 2021 at 10:38:41AM +0100, Hannes Reinecke wrote:
> On 2/5/21 10:32 AM, Christoph Hellwig wrote:
>> On Fri, Feb 05, 2021 at 10:29:26AM +0100, Hannes Reinecke wrote:
>>> Implement per-host namespaces masking, allowing to expose
>>> different namespaces to different hosts connecting to the
>>> same subsystem.
>>> The original method of adding host nqns to the subsystem
>>> takes precedence over the per-host namespace masking.
>>
>> Why do you want to expose a single subsystem to multiple hosts?  This
>> creates all kinds of covert channels better avoided.
>>
> But the alternative is a subsystem per host.
> Which leads to a proliferation of subsystems on the target.

Yes, that was the choice we had in mind when writing the code.  The
subsystems are very cheap as they don't require any resources except
for some fairly small memory allocations.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] nvmet: per-host namespaces masking
  2021-02-05  9:29 [PATCH] nvmet: per-host namespaces masking Hannes Reinecke
  2021-02-05  9:32 ` Christoph Hellwig
@ 2021-02-05 18:42 ` Sagi Grimberg
  1 sibling, 0 replies; 5+ messages in thread
From: Sagi Grimberg @ 2021-02-05 18:42 UTC (permalink / raw)
  To: Hannes Reinecke, Christoph Hellwig
  Cc: linux-nvme, Chaitanya Kulkarni, Keith Busch


> Implement per-host namespaces masking, allowing to expose
> different namespaces to different hosts connecting to the
> same subsystem.
> The original method of adding host nqns to the subsystem
> takes precedence over the per-host namespace masking.

What is preventing a host to go ahead and issue an I/O command
to a ns that exists in the subsystem but is not exposed to
it in the nslist?

I don't think this can be even considered without a protection
against it. And given that you don't want to take the global
semaphore on each and every I/O, I'd say its a bit more involving
than what you have here...

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-02-05 18:42 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-05  9:29 [PATCH] nvmet: per-host namespaces masking Hannes Reinecke
2021-02-05  9:32 ` Christoph Hellwig
2021-02-05  9:38   ` Hannes Reinecke
2021-02-05  9:40     ` Christoph Hellwig
2021-02-05 18:42 ` Sagi Grimberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.