All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH 0/6] sysfs support for LightNVM
@ 2016-06-06 14:45 Matias Bjørling
  2016-06-06 14:45 ` [RFC PATCH 1/6] nvme: refactor namespaces to support non-gendisk devices Matias Bjørling
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:45 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Matias Bjørling

The common way to expose a storage device is to export it through the
gendisk structure, which takes care of handling the visibility lifetime
of the device to user-space and carries device driver and block layer
specific sysfs entries.

For LightNVM, no gendisk is exposed, which hides these entries.

This patchset enables LightNVM to expose a subset of the gendisk entries
by implementing a new non-gendisk device that only exposes the sysfs
entries that are relevant to the device driver and LightNVM. For example
/mq and the NVMe sysfs entries.

The new non-gendisk exposes itself in place of the original gendisk.
Thus, a gendisk device exposed through /sys/class/nvme/nvme0n1, will
still be exposed through the same directory, although only with the /mq,
lightnvm and nvme specific entries.

The LightNVM storage device can be found through sysfs by searching
for devices that has the "lightnvm" dev type.

Any feedback is appreciated.

Thank you,
Matias

Matias Bjørling (4):
  nvme: refactor namespaces to support non-gendisk devices
  null_blk: refactor to support non-gendisk devices
  blk-mq: register device instead of disk
  lightnvm: let drivers control the lifetime of nvm_dev

Simon Lund (2):
  lightnvm: expose device geometry through sysfs
  lightnvm: expose gennvm target type through sysfs

 block/blk-mq-sysfs.c         |  13 ++-
 block/blk-sysfs.c            |   4 +-
 drivers/block/null_blk.c     | 129 +++++++++++++++++-----------
 drivers/lightnvm/Makefile    |   2 +-
 drivers/lightnvm/core.c      |  52 +++++------
 drivers/lightnvm/gennvm.c    | 109 +++++++++++++++++++----
 drivers/lightnvm/lightnvm.h  |  35 ++++++++
 drivers/lightnvm/sysfs.c     | 200 +++++++++++++++++++++++++++++++++++++++++++
 drivers/md/dm.c              |   2 +-
 drivers/nvme/host/core.c     | 165 ++++++++++++++++++++---------------
 drivers/nvme/host/lightnvm.c |  29 ++++++-
 drivers/nvme/host/nvme.h     |  26 ++++--
 include/linux/blk-mq.h       |   4 +-
 include/linux/lightnvm.h     |  20 +++--
 14 files changed, 593 insertions(+), 197 deletions(-)
 create mode 100644 drivers/lightnvm/lightnvm.h
 create mode 100644 drivers/lightnvm/sysfs.c

-- 
2.1.4


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [RFC PATCH 1/6] nvme: refactor namespaces to support non-gendisk devices
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
@ 2016-06-06 14:45 ` Matias Bjørling
  2016-06-06 14:45 ` [RFC PATCH 2/6] null_blk: refactor " Matias Bjørling
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:45 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Matias Bjørling

With LightNVM enabled namespaces, the gendisk structure is not exposed
to the user. This prevents LightNVM users from accessing the NVMe device
driver specific sysfs entries, and LightNVM namespace geometry.

Refactor the revalidation process, so that a namespace, instead of a
gendisk, is revalidated. This later allows patches to wire up the sysfs
entries up to a non-gendisk namespace.

Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/nvme/host/core.c | 134 +++++++++++++++++++++++++++++------------------
 drivers/nvme/host/nvme.h |  16 +++++-
 2 files changed, 96 insertions(+), 54 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 643f457..f488729 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -601,42 +601,33 @@ static void nvme_config_discard(struct nvme_ns *ns)
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
 }
 
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 {
-	struct nvme_ns *ns = disk->private_data;
-	struct nvme_id_ns *id;
-	u8 lbaf, pi_type;
-	u16 old_ms;
-	unsigned short bs;
-
-	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
-		set_capacity(disk, 0);
-		return -ENODEV;
-	}
-	if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
+	if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
 		dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
 				__func__);
 		return -ENODEV;
 	}
-	if (id->ncap == 0) {
-		kfree(id);
-		return -ENODEV;
-	}
 
-	if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
-		if (nvme_nvm_register(ns->queue, disk->disk_name)) {
-			dev_warn(disk_to_dev(ns->disk),
-				"%s: LightNVM init failure\n", __func__);
-			kfree(id);
-			return -ENODEV;
-		}
-		ns->type = NVME_NS_LIGHTNVM;
+	if ((*id)->ncap == 0) {
+		kfree(*id);
+		return -ENODEV;
 	}
 
 	if (ns->ctrl->vs >= NVME_VS(1, 1))
-		memcpy(ns->eui, id->eui64, sizeof(ns->eui));
+		memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
 	if (ns->ctrl->vs >= NVME_VS(1, 2))
-		memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
+		memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+
+	return 0;
+}
+
+static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+{
+	struct nvme_ns *ns = disk->private_data;
+	u8 lbaf, pi_type;
+	u16 old_ms;
+	unsigned short bs;
 
 	old_ms = ns->ms;
 	lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
@@ -675,8 +666,26 @@ static int nvme_revalidate_disk(struct gendisk *disk)
 	if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
 		nvme_config_discard(ns);
 	blk_mq_unfreeze_queue(disk->queue);
+}
 
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+	struct nvme_ns *ns = disk->private_data;
+	struct nvme_id_ns *id = NULL;
+	int ret;
+
+	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+		set_capacity(disk, 0);
+		return -ENODEV;
+	}
+
+	ret = nvme_revalidate_ns(ns, &id);
+	if (ret)
+		return ret;
+
+	__nvme_revalidate_disk(disk, id);
 	kfree(id);
+
 	return 0;
 }
 
@@ -1228,6 +1237,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
 	struct nvme_ns *ns;
 	struct gendisk *disk;
+	struct nvme_id_ns *id;
+	char disk_name[DISK_NAME_LEN];
 	int node = dev_to_node(ctrl->dev);
 
 	lockdep_assert_held(&ctrl->namespaces_mutex);
@@ -1247,44 +1258,63 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	ns->queue->queuedata = ns;
 	ns->ctrl = ctrl;
 
-	disk = alloc_disk_node(0, node);
-	if (!disk)
-		goto out_free_queue;
-
 	kref_init(&ns->kref);
 	ns->ns_id = nsid;
-	ns->disk = disk;
 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
 
-
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 	nvme_set_queue_limits(ctrl, ns->queue);
 
-	disk->major = nvme_major;
-	disk->first_minor = 0;
-	disk->fops = &nvme_fops;
-	disk->private_data = ns;
-	disk->queue = ns->queue;
-	disk->driverfs_dev = ctrl->device;
-	disk->flags = GENHD_FL_EXT_DEVT;
-	sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
-
-	if (nvme_revalidate_disk(ns->disk))
-		goto out_free_disk;
+	if (nvme_revalidate_ns(ns, &id))
+		goto out_free_queue;
+
+	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
+
+	if (nvme_nvm_ns_supported(ns, id)) {
+		if (nvme_nvm_register(ns->queue, disk_name)) {
+			dev_warn(ctrl->dev,
+				"%s: LightNVM init failure\n", __func__);
+			goto out_free_id;
+		}
+
+		disk = alloc_disk_node(0, node);
+		if (!disk)
+			goto out_free_id;
+		memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+		ns->disk = disk;
+		ns->type = NVME_NS_LIGHTNVM;
+	} else {
+		disk = alloc_disk_node(0, node);
+		if (!disk)
+			goto out_free_id;
+
+		disk->major = nvme_major;
+		disk->first_minor = 0;
+		disk->fops = &nvme_fops;
+		disk->private_data = ns;
+		disk->queue = ns->queue;
+		disk->driverfs_dev = ctrl->device;
+		disk->flags = GENHD_FL_EXT_DEVT;
+		memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+		ns->disk = disk;
+
+		__nvme_revalidate_disk(disk, id);
+
+		add_disk(ns->disk);
+
+		if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+							&nvme_ns_attr_group))
+			pr_warn("%s: failed to create sysfs group for identification\n",
+				ns->disk->disk_name);
+	}
 
 	list_add_tail(&ns->list, &ctrl->namespaces);
 	kref_get(&ctrl->kref);
-	if (ns->type == NVME_NS_LIGHTNVM)
-		return;
 
-	add_disk(ns->disk);
-	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
-					&nvme_ns_attr_group))
-		pr_warn("%s: failed to create sysfs group for identification\n",
-			ns->disk->disk_name);
+	kfree(id);
 	return;
- out_free_disk:
-	kfree(disk);
+ out_free_id:
+	kfree(id);
  out_free_queue:
 	blk_cleanup_queue(ns->queue);
  out_release_instance:
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f846da4..0d0c81d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -291,8 +291,16 @@ int nvme_sg_get_version_num(int __user *ip);
 
 #ifdef CONFIG_NVM
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct request_queue *q, char *disk_name);
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
+void nvme_nvm_unregister(struct nvme_ns *ns);
+
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+	if (dev->type->devnode)
+		return dev_to_disk(dev)->private_data;
+
+	return (container_of(dev, struct nvm_dev, dev))->private_data;
+}
 #else
 static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
 {
@@ -305,6 +313,10 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
 {
 	return 0;
 }
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+	return dev_to_disk(dev)->private_data;
+}
 #endif /* CONFIG_NVM */
 
 int __init nvme_core_init(void);
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC PATCH 2/6] null_blk: refactor to support non-gendisk devices
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
  2016-06-06 14:45 ` [RFC PATCH 1/6] nvme: refactor namespaces to support non-gendisk devices Matias Bjørling
@ 2016-06-06 14:45 ` Matias Bjørling
  2016-06-06 14:46 ` [RFC PATCH 3/6] blk-mq: register device instead of disk Matias Bjørling
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:45 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Matias Bjørling

With LightNVM enabled devices, the gendisk structure is not exposed
to the user. This hides the device driver specific sysfs entries, and
prevents binding of LightNVM geometry information to the device.

Refactor the device registration process, so that gendisk and
non-gendisk devices are easily managed.

Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/block/null_blk.c | 111 ++++++++++++++++++++++++++---------------------
 1 file changed, 62 insertions(+), 49 deletions(-)

diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index cab9759..1ffbf4d 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -414,23 +414,6 @@ static void cleanup_queues(struct nullb *nullb)
 	kfree(nullb->queues);
 }
 
-static void null_del_dev(struct nullb *nullb)
-{
-	list_del_init(&nullb->list);
-
-	if (use_lightnvm)
-		nvm_unregister(nullb->disk_name);
-	else
-		del_gendisk(nullb->disk);
-	blk_cleanup_queue(nullb->q);
-	if (queue_mode == NULL_Q_MQ)
-		blk_mq_free_tag_set(&nullb->tag_set);
-	if (!use_lightnvm)
-		put_disk(nullb->disk);
-	cleanup_queues(nullb);
-	kfree(nullb);
-}
-
 #ifdef CONFIG_NVM
 
 static void null_lnvm_end_io(struct request *rq, int error)
@@ -564,10 +547,41 @@ static struct nvm_dev_ops null_lnvm_dev_ops = {
 	/* Simulate nvme protocol restriction */
 	.max_phys_sect		= 64,
 };
+
+static int null_nvm_register(struct nullb *nullb)
+{
+	return nvm_register(nullb->q, nullb->disk_name, &null_lnvm_dev_ops);
+}
+
+static void null_nvm_unregister(struct nullb *nullb)
+{
+	nvm_unregister(nullb->disk_name);
+}
 #else
-static struct nvm_dev_ops null_lnvm_dev_ops;
+static int null_nvm_register(struct nullb *nullb)
+{
+	return -EINVAL;
+}
+static void null_nvm_unregister(struct nullb *nullb) {}
 #endif /* CONFIG_NVM */
 
+static void null_del_dev(struct nullb *nullb)
+{
+	list_del_init(&nullb->list);
+
+	if (use_lightnvm)
+		null_nvm_unregister(nullb);
+	else
+		del_gendisk(nullb->disk);
+	blk_cleanup_queue(nullb->q);
+	if (queue_mode == NULL_Q_MQ)
+		blk_mq_free_tag_set(&nullb->tag_set);
+	if (!use_lightnvm)
+		put_disk(nullb->disk);
+	cleanup_queues(nullb);
+	kfree(nullb);
+}
+
 static int null_open(struct block_device *bdev, fmode_t mode)
 {
 	return 0;
@@ -640,11 +654,33 @@ static int init_driver_queues(struct nullb *nullb)
 	return 0;
 }
 
-static int null_add_dev(void)
+static int null_gendisk_register(struct nullb *nullb)
 {
 	struct gendisk *disk;
-	struct nullb *nullb;
 	sector_t size;
+
+	disk = nullb->disk = alloc_disk_node(1, home_node);
+	if (!disk) {
+		return -ENOMEM;
+	}
+	size = gb * 1024 * 1024 * 1024ULL;
+	set_capacity(disk, size >> 9);
+
+	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
+	disk->major		= null_major;
+	disk->first_minor	= nullb->index;
+	disk->fops		= &null_fops;
+	disk->private_data	= nullb;
+	disk->queue		= nullb->q;
+	strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
+	add_disk(disk);
+	return 0;
+}
+
+static int null_add_dev(void)
+{
+	struct nullb *nullb;
 	int rv;
 
 	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
@@ -716,42 +752,19 @@ static int null_add_dev(void)
 
 	sprintf(nullb->disk_name, "nullb%d", nullb->index);
 
-	if (use_lightnvm) {
-		rv = nvm_register(nullb->q, nullb->disk_name,
-							&null_lnvm_dev_ops);
-		if (rv)
-			goto out_cleanup_blk_queue;
-		goto done;
-	}
+	if (use_lightnvm)
+		rv = null_nvm_register(nullb);
+	else
+		rv = null_gendisk_register(nullb);
 
-	disk = nullb->disk = alloc_disk_node(1, home_node);
-	if (!disk) {
-		rv = -ENOMEM;
-		goto out_cleanup_lightnvm;
-	}
-	size = gb * 1024 * 1024 * 1024ULL;
-	set_capacity(disk, size >> 9);
+	if (rv)
+		goto out_cleanup_blk_queue;
 
-	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
-	disk->major		= null_major;
-	disk->first_minor	= nullb->index;
-	disk->fops		= &null_fops;
-	disk->private_data	= nullb;
-	disk->queue		= nullb->q;
-	strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
-
-	add_disk(disk);
-
-done:
 	mutex_lock(&lock);
 	list_add_tail(&nullb->list, &nullb_list);
 	mutex_unlock(&lock);
 
 	return 0;
-
-out_cleanup_lightnvm:
-	if (use_lightnvm)
-		nvm_unregister(nullb->disk_name);
 out_cleanup_blk_queue:
 	blk_cleanup_queue(nullb->q);
 out_cleanup_tags:
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC PATCH 3/6] blk-mq: register device instead of disk
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
  2016-06-06 14:45 ` [RFC PATCH 1/6] nvme: refactor namespaces to support non-gendisk devices Matias Bjørling
  2016-06-06 14:45 ` [RFC PATCH 2/6] null_blk: refactor " Matias Bjørling
@ 2016-06-06 14:46 ` Matias Bjørling
  2016-06-06 14:46 ` [RFC PATCH 4/6] lightnvm: let drivers control the lifetime of nvm_dev Matias Bjørling
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:46 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Matias Bjørling

Enable devices without a gendisk instance to register itself with blk-mq
and expose the associated multi-queue sysfs entries.

Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 block/blk-mq-sysfs.c   | 13 +++++--------
 block/blk-sysfs.c      |  4 ++--
 drivers/md/dm.c        |  2 +-
 include/linux/blk-mq.h |  4 ++--
 4 files changed, 10 insertions(+), 13 deletions(-)

diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 4ea4dd8..f67b5c1 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -380,9 +380,8 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 	return ret;
 }
 
-void blk_mq_unregister_disk(struct gendisk *disk)
+void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 {
-	struct request_queue *q = disk->queue;
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_ctx *ctx;
 	int i, j;
@@ -402,7 +401,7 @@ void blk_mq_unregister_disk(struct gendisk *disk)
 	kobject_del(&q->mq_kobj);
 	kobject_put(&q->mq_kobj);
 
-	kobject_put(&disk_to_dev(disk)->kobj);
+	kobject_put(&dev->kobj);
 
 	q->mq_sysfs_init_done = false;
 	blk_mq_enable_hotplug();
@@ -426,10 +425,8 @@ static void blk_mq_sysfs_init(struct request_queue *q)
 	}
 }
 
-int blk_mq_register_disk(struct gendisk *disk)
+int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 {
-	struct device *dev = disk_to_dev(disk);
-	struct request_queue *q = disk->queue;
 	struct blk_mq_hw_ctx *hctx;
 	int ret, i;
 
@@ -450,7 +447,7 @@ int blk_mq_register_disk(struct gendisk *disk)
 	}
 
 	if (ret)
-		blk_mq_unregister_disk(disk);
+		blk_mq_unregister_dev(dev, q);
 	else
 		q->mq_sysfs_init_done = true;
 out:
@@ -458,7 +455,7 @@ out:
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(blk_mq_register_disk);
+EXPORT_SYMBOL_GPL(blk_mq_register_dev);
 
 void blk_mq_sysfs_unregister(struct request_queue *q)
 {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 995b58d..a998c54 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -654,7 +654,7 @@ int blk_register_queue(struct gendisk *disk)
 	kobject_uevent(&q->kobj, KOBJ_ADD);
 
 	if (q->mq_ops)
-		blk_mq_register_disk(disk);
+		blk_mq_register_dev(disk_to_dev(disk), q);
 
 	if (!q->request_fn)
 		return 0;
@@ -679,7 +679,7 @@ void blk_unregister_queue(struct gendisk *disk)
 		return;
 
 	if (q->mq_ops)
-		blk_mq_unregister_disk(disk);
+		blk_mq_unregister_dev(disk_to_dev(disk), q);
 
 	if (q->request_fn)
 		elv_unregister_queue(q);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3d3ac13..6a4b596 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2797,7 +2797,7 @@ static int dm_mq_init_request_queue(struct mapped_device *md,
 	dm_init_md_queue(md);
 
 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
-	blk_mq_register_disk(md->disk);
+	blk_mq_register_dev(disk_to_dev(md->disk), q);
 
 	return 0;
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9ac9799..9a37515 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -176,8 +176,8 @@ enum {
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 						  struct request_queue *q);
-int blk_mq_register_disk(struct gendisk *);
-void blk_mq_unregister_disk(struct gendisk *);
+int blk_mq_register_dev(struct device *, struct request_queue *);
+void blk_mq_unregister_dev(struct device *, struct request_queue *);
 
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC PATCH 4/6] lightnvm: let drivers control the lifetime of nvm_dev
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
                   ` (2 preceding siblings ...)
  2016-06-06 14:46 ` [RFC PATCH 3/6] blk-mq: register device instead of disk Matias Bjørling
@ 2016-06-06 14:46 ` Matias Bjørling
  2016-06-06 14:46 ` [RFC PATCH 5/6] lightnvm: expose device geometry through sysfs Matias Bjørling
  2016-06-06 14:46 ` [RFC PATCH 6/6] lightnvm: expose gennvm target type " Matias Bjørling
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:46 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Matias Bjørling

LightNVM does not currently expose the device driver sysfs specific
entries to user-space, as the device driver skips the initialization of
gendisk.

To enable a device driver sysfs entries to be exposed, we need a struct
device to attach it to. To allow both the device driver and LightNVM to
access the same struct device, we need the device driver to track the
lifetime of the nvm_dev structure.

This patch refactors the two users of LightNVM (NVMe and null_blk),
enables them to allocate and free nvm_dev, and at last removes gendisk
usage when a LightNVM device is used.

Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/block/null_blk.c     | 22 ++++++++++++++++++++--
 drivers/lightnvm/core.c      | 34 +++++++---------------------------
 drivers/nvme/host/core.c     | 34 ++++++++++++++--------------------
 drivers/nvme/host/lightnvm.c | 21 +++++++++++++++++----
 drivers/nvme/host/nvme.h     |  8 +++++---
 include/linux/lightnvm.h     | 15 +++++++++------
 6 files changed, 72 insertions(+), 62 deletions(-)

diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 1ffbf4d..55b52d8 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -34,6 +34,7 @@ struct nullb {
 	unsigned int index;
 	struct request_queue *q;
 	struct gendisk *disk;
+	struct nvm_dev *ndev;
 	struct blk_mq_tag_set tag_set;
 	struct hrtimer timer;
 	unsigned int queue_depth;
@@ -550,12 +551,29 @@ static struct nvm_dev_ops null_lnvm_dev_ops = {
 
 static int null_nvm_register(struct nullb *nullb)
 {
-	return nvm_register(nullb->q, nullb->disk_name, &null_lnvm_dev_ops);
+	struct nvm_dev *dev;
+	int rv;
+
+	dev = nvm_alloc_dev(0);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->q = nullb->q;
+	memcpy(dev->name, disk_name, DISK_NAME_LEN);
+	dev->ops = &null_lnvm_dev_ops;
+
+	rv = nvm_register(dev);
+	if (rv) {
+		kfree(dev);
+		return rv;
+	}
+	nullb->ndev = dev;
+	return 0;
 }
 
 static void null_nvm_unregister(struct nullb *nullb)
 {
-	nvm_unregister(nullb->disk_name);
+	nvm_unregister(nullb->ndev);
 }
 #else
 static int null_nvm_register(struct nullb *nullb)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 8a5c2b2..5736c59 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -655,23 +655,15 @@ static void nvm_exit(struct nvm_dev *dev)
 	pr_info("nvm: successfully unloaded\n");
 }
 
-int nvm_register(struct request_queue *q, char *disk_name,
-							struct nvm_dev_ops *ops)
+struct nvm_dev *nvm_alloc_dev(int node)
+{
+	return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+}
+
+int nvm_register(struct nvm_dev *dev)
 {
-	struct nvm_dev *dev;
 	int ret;
 
-	if (!ops->identity)
-		return -EINVAL;
-
-	dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	dev->q = q;
-	dev->ops = ops;
-	strncpy(dev->name, disk_name, DISK_NAME_LEN);
-
 	ret = nvm_init(dev);
 	if (ret)
 		goto err_init;
@@ -709,29 +701,17 @@ int nvm_register(struct request_queue *q, char *disk_name,
 	return 0;
 err_init:
 	kfree(dev->lun_map);
-	kfree(dev);
 	return ret;
 }
 EXPORT_SYMBOL(nvm_register);
 
-void nvm_unregister(char *disk_name)
+void nvm_unregister(struct nvm_dev *dev)
 {
-	struct nvm_dev *dev;
-
 	down_write(&nvm_lock);
-	dev = nvm_find_nvm_dev(disk_name);
-	if (!dev) {
-		pr_err("nvm: could not find device %s to unregister\n",
-								disk_name);
-		up_write(&nvm_lock);
-		return;
-	}
-
 	list_del(&dev->devices);
 	up_write(&nvm_lock);
 
 	nvm_exit(dev);
-	kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
 
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f488729..ff20f1c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -62,12 +62,14 @@ static void nvme_free_ns(struct kref *kref)
 {
 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
 
-	if (ns->type == NVME_NS_LIGHTNVM)
-		nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+	if (ns->ndev)
+		nvme_nvm_unregister(ns);
 
-	spin_lock(&dev_list_lock);
-	ns->disk->private_data = NULL;
-	spin_unlock(&dev_list_lock);
+	if (ns->disk) {
+		spin_lock(&dev_list_lock);
+		ns->disk->private_data = NULL;
+		spin_unlock(&dev_list_lock);
+	}
 
 	put_disk(ns->disk);
 	ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
@@ -604,8 +606,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
 static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 {
 	if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
-		dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
-				__func__);
+		dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
 		return -ENODEV;
 	}
 
@@ -1271,18 +1272,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
 
 	if (nvme_nvm_ns_supported(ns, id)) {
-		if (nvme_nvm_register(ns->queue, disk_name)) {
-			dev_warn(ctrl->dev,
-				"%s: LightNVM init failure\n", __func__);
+		if (nvme_nvm_register(ns, disk_name, node)) {
+			dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
+								__func__);
 			goto out_free_id;
 		}
-
-		disk = alloc_disk_node(0, node);
-		if (!disk)
-			goto out_free_id;
-		memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
-		ns->disk = disk;
-		ns->type = NVME_NS_LIGHTNVM;
 	} else {
 		disk = alloc_disk_node(0, node);
 		if (!disk)
@@ -1328,7 +1322,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
 		return;
 
-	if (ns->disk->flags & GENHD_FL_UP) {
+	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
 		if (blk_get_integrity(ns->disk))
 			blk_integrity_unregister(ns->disk);
 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
@@ -1349,7 +1343,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	ns = nvme_find_ns(ctrl, nsid);
 	if (ns) {
-		if (revalidate_disk(ns->disk))
+		if (ns->disk && revalidate_disk(ns->disk))
 			nvme_ns_remove(ns);
 	} else
 		nvme_alloc_ns(ctrl, nsid);
@@ -1559,7 +1553,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 		 * Revalidating a dead namespace sets capacity to 0. This will
 		 * end buffered writers dirtying pages that can't be synced.
 		 */
-		if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+		if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
 			revalidate_disk(ns->disk);
 
 		blk_set_queue_dying(ns->queue);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a0af055..cd406c5 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -592,14 +592,27 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
 	.max_phys_sect		= 64,
 };
 
-int nvme_nvm_register(struct request_queue *q, char *disk_name)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 {
-	return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
+	struct request_queue *q = ns->queue;
+	struct nvm_dev *dev;
+
+	dev = nvm_alloc_dev(node);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->q = q;
+	memcpy(dev->name, disk_name, DISK_NAME_LEN);
+	dev->ops = &nvme_nvm_dev_ops;
+	ns->ndev = dev;
+
+	return nvm_register(dev);
 }
 
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
+void nvme_nvm_unregister(struct nvme_ns *ns)
 {
-	nvm_unregister(disk_name);
+	nvm_unregister(ns->ndev);
+	kfree(ns->ndev);
 }
 
 /* move to shared place when used in multiple places. */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0d0c81d..d62c6fd 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
+#include <linux/lightnvm.h>
 
 enum {
 	/*
@@ -110,6 +111,7 @@ struct nvme_ns {
 	struct nvme_ctrl *ctrl;
 	struct request_queue *queue;
 	struct gendisk *disk;
+	struct nvm_dev *ndev;
 	struct kref kref;
 	int instance;
 
@@ -121,7 +123,6 @@ struct nvme_ns {
 	u16 ms;
 	bool ext;
 	u8 pi_type;
-	int type;
 	unsigned long flags;
 
 #define NVME_NS_REMOVING 0
@@ -302,12 +303,13 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 	return (container_of(dev, struct nvm_dev, dev))->private_data;
 }
 #else
-static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
+static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
+								int node)
 {
 	return 0;
 }
 
-static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
+static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
 
 static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ba78b83..5afc263 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -524,9 +524,9 @@ extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
 								unsigned long);
 extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
 
-extern int nvm_register(struct request_queue *, char *,
-						struct nvm_dev_ops *);
-extern void nvm_unregister(char *);
+extern struct nvm_dev *nvm_alloc_dev(int);
+extern int nvm_register(struct nvm_dev *);
+extern void nvm_unregister(struct nvm_dev *);
 
 void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
 
@@ -575,11 +575,14 @@ extern int nvm_dev_factory(struct nvm_dev *, int flags);
 #else /* CONFIG_NVM */
 struct nvm_dev_ops;
 
-static inline int nvm_register(struct request_queue *q, char *disk_name,
-							struct nvm_dev_ops *ops)
+static inline struct nvm_dev *nvm_alloc_dev(int node)
+{
+	return ERR_PTR(-EINVAL);
+}
+static inline int nvm_register(struct nvm_dev *dev)
 {
 	return -EINVAL;
 }
-static inline void nvm_unregister(char *disk_name) {}
+static inline void nvm_unregister(struct nvm_dev *dev) {}
 #endif /* CONFIG_NVM */
 #endif /* LIGHTNVM.H */
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC PATCH 5/6] lightnvm: expose device geometry through sysfs
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
                   ` (3 preceding siblings ...)
  2016-06-06 14:46 ` [RFC PATCH 4/6] lightnvm: let drivers control the lifetime of nvm_dev Matias Bjørling
@ 2016-06-06 14:46 ` Matias Bjørling
  2016-06-06 14:46 ` [RFC PATCH 6/6] lightnvm: expose gennvm target type " Matias Bjørling
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:46 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Simon Lund, Matias Bjørling

From: Simon Lund <slund@cnexlabs.com>

For a host to access an Open-Channel SSD, it has to know its geometry,
so that it writes and reads at the appropriate device bounds.

The geometry information is kept within the kernel, and not
exported to user-space for consumption. This patch exposes the
configuration through sysfs and enables user-space libraries, such as
liblightnvm, to use the sysfs implementation to get the geometry of an
Open-Channel SSD.

The sysfs entries are stored within the device hierarchy, and can be
found using the "lightnvm" device type.

An example configuration looks like this:

/sys/class/nvme/
└── nvme0n1
   ├── capabilities: 3
   ├── device_mode: 1
   ├── channel_parallelism: 0
   ├── erase_max: 1000000
   ├── erase_typ: 1000000
   ├── flash_media_type: 0
   ├── media_capabilities: 0x00000001
   ├── media_type: 0
   ├── multiplane: 0x00010101
   ├── num_blocks: 1022
   ├── num_channels: 1
   ├── num_luns: 4
   ├── num_pages: 64
   ├── num_planes: 1
   ├── page_size: 4096
   ├── prog_max: 100000
   ├── prog_typ: 100000
   ├── read_max: 10000
   ├── read_typ: 10000
   ├── sector_oob_size: 0
   ├── sector_size: 4096
   ├── media_manager: gennvm
   ├── num_groups: 1
   ├── ppa_format: 0x380830082808001010102008
   ├── vendor_opcode: 0
   └── version: 1

Signed-off-by: Simon Lund <slund@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/Makefile    |   2 +-
 drivers/lightnvm/core.c      |  20 +++--
 drivers/lightnvm/lightnvm.h  |  35 ++++++++
 drivers/lightnvm/sysfs.c     | 200 +++++++++++++++++++++++++++++++++++++++++++
 drivers/nvme/host/core.c     |  13 +--
 drivers/nvme/host/lightnvm.c |  12 ++-
 drivers/nvme/host/nvme.h     |   6 +-
 include/linux/lightnvm.h     |   3 +
 8 files changed, 274 insertions(+), 17 deletions(-)
 create mode 100644 drivers/lightnvm/lightnvm.h
 create mode 100644 drivers/lightnvm/sysfs.c

diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22..1f6b652 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
 # Makefile for Open-Channel SSDs.
 #
 
-obj-$(CONFIG_NVM)		:= core.o sysblk.o
+obj-$(CONFIG_NVM)		:= core.o sysblk.o sysfs.o
 obj-$(CONFIG_NVM_GENNVM) 	+= gennvm.o
 obj-$(CONFIG_NVM_RRPC)		+= rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5736c59..f71fbb9 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,6 +27,8 @@
 #include <linux/lightnvm.h>
 #include <linux/sched/sysctl.h>
 
+#include "lightnvm.h"
+
 static LIST_HEAD(nvm_tgt_types);
 static DECLARE_RWSEM(nvm_tgtt_lock);
 static LIST_HEAD(nvm_mgrs);
@@ -593,15 +595,19 @@ static void nvm_free_mgr(struct nvm_dev *dev)
 	dev->mt = NULL;
 }
 
-static void nvm_free(struct nvm_dev *dev)
+void nvm_free(struct nvm_dev *dev)
 {
 	if (!dev)
 		return;
 
 	nvm_free_mgr(dev);
 
+	if (dev->dma_pool)
+		dev->ops->destroy_dma_pool(dev->dma_pool);
+
 	kfree(dev->lptbl);
 	kfree(dev->lun_map);
+	kfree(dev);
 }
 
 static int nvm_init(struct nvm_dev *dev)
@@ -648,11 +654,7 @@ err:
 
 static void nvm_exit(struct nvm_dev *dev)
 {
-	if (dev->dma_pool)
-		dev->ops->destroy_dma_pool(dev->dma_pool);
-	nvm_free(dev);
-
-	pr_info("nvm: successfully unloaded\n");
+	nvm_sysfs_unregister_dev(dev);
 }
 
 struct nvm_dev *nvm_alloc_dev(int node)
@@ -683,6 +685,10 @@ int nvm_register(struct nvm_dev *dev)
 		}
 	}
 
+	ret = nvm_sysfs_register_dev(dev);
+	if (ret)
+		goto err_ppalist;
+
 	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 		ret = nvm_get_sysblock(dev, &dev->sb);
 		if (!ret)
@@ -699,6 +705,8 @@ int nvm_register(struct nvm_dev *dev)
 	up_write(&nvm_lock);
 
 	return 0;
+err_ppalist:
+	dev->ops->destroy_dma_pool(dev->dma_pool);
 err_init:
 	kfree(dev->lun_map);
 	return ret;
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
new file mode 100644
index 0000000..93f1aac
--- /dev/null
+++ b/drivers/lightnvm/lightnvm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 CNEX Labs. All rights reserved.
+ * Initial release: Matias Bjorling <matias@cnexlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#ifndef LIGHTNVM_H
+#define LIGHTNVM_H
+
+#include <linux/lightnvm.h>
+
+/* core -> sysfs.c */
+int nvm_sysfs_register_dev(struct nvm_dev *);
+void nvm_sysfs_unregister_dev(struct nvm_dev *);
+int nvm_sysfs_register(void);
+void nvm_sysfs_unregister(void);
+
+/* sysfs > core */
+void nvm_free(struct nvm_dev *);
+
+#endif
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
new file mode 100644
index 0000000..82d340a
--- /dev/null
+++ b/drivers/lightnvm/sysfs.c
@@ -0,0 +1,200 @@
+#include <linux/kernel.h>
+#include <linux/lightnvm.h>
+#include <linux/miscdevice.h>
+#include <linux/kobject.h>
+#include <linux/blk-mq.h>
+
+#include "lightnvm.h"
+
+static ssize_t nvm_dev_attr_show(struct device *dev,
+				 struct device_attribute *dattr, char *page)
+{
+	struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
+	struct nvm_id *id = &ndev->identity;
+	struct nvm_id_group *grp = &id->groups[0];
+	struct attribute *attr = &dattr->attr;
+
+	if (strcmp(attr->name, "version") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+	} else if (strcmp(attr->name, "num_groups") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->cgrps);
+	} else if (strcmp(attr->name, "capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+	} else if (strcmp(attr->name, "device_mode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+	} else if (strcmp(attr->name, "media_manager") == 0) {
+		if (!ndev->mt)
+			return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+		return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+	} else if (strcmp(attr->name, "ppa_format") == 0) {
+		return scnprintf(page, PAGE_SIZE,
+			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			id->ppaf.ch_offset, id->ppaf.ch_len,
+			id->ppaf.lun_offset, id->ppaf.lun_len,
+			id->ppaf.pln_offset, id->ppaf.pln_len,
+			id->ppaf.blk_offset, id->ppaf.blk_len,
+			id->ppaf.pg_offset, id->ppaf.pg_len,
+			id->ppaf.sect_offset, id->ppaf.sect_len);
+	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+	} else if (strcmp(attr->name, "flash_media_type") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+	} else if (strcmp(attr->name, "num_channels") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+	} else if (strcmp(attr->name, "num_luns") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+	} else if (strcmp(attr->name, "num_planes") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+	} else if (strcmp(attr->name, "num_pages") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+	} else if (strcmp(attr->name, "page_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+	} else if (strcmp(attr->name, "read_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+	} else if (strcmp(attr->name, "read_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+	} else if (strcmp(attr->name, "prog_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+	} else if (strcmp(attr->name, "prog_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+	} else if (strcmp(attr->name, "erase_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+	} else if (strcmp(attr->name, "erase_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+	} else if (strcmp(attr->name, "media_capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+	} else if (strcmp(attr->name, "channel_parallelism") == 0) {/* u16 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->cpar);
+	} else {
+		return scnprintf(page,
+				 PAGE_SIZE,
+				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+				 attr->name);
+	}
+}
+
+#define NVM_DEV_ATTR_RO(_name) 					\
+static DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL);
+
+NVM_DEV_ATTR_RO(version);
+NVM_DEV_ATTR_RO(vendor_opcode);
+NVM_DEV_ATTR_RO(num_groups);
+NVM_DEV_ATTR_RO(capabilities);
+NVM_DEV_ATTR_RO(device_mode);
+NVM_DEV_ATTR_RO(ppa_format);
+NVM_DEV_ATTR_RO(media_manager);
+
+NVM_DEV_ATTR_RO(media_type);
+NVM_DEV_ATTR_RO(flash_media_type);
+NVM_DEV_ATTR_RO(num_channels);
+NVM_DEV_ATTR_RO(num_luns);
+NVM_DEV_ATTR_RO(num_planes);
+NVM_DEV_ATTR_RO(num_blocks);
+NVM_DEV_ATTR_RO(num_pages);
+NVM_DEV_ATTR_RO(page_size);
+NVM_DEV_ATTR_RO(hw_sector_size);
+NVM_DEV_ATTR_RO(oob_sector_size);
+NVM_DEV_ATTR_RO(read_typ);
+NVM_DEV_ATTR_RO(read_max);
+NVM_DEV_ATTR_RO(prog_typ);
+NVM_DEV_ATTR_RO(prog_max);
+NVM_DEV_ATTR_RO(erase_typ);
+NVM_DEV_ATTR_RO(erase_max);
+NVM_DEV_ATTR_RO(multiplane_modes);
+NVM_DEV_ATTR_RO(media_capabilities);
+NVM_DEV_ATTR_RO(channel_parallelism);
+
+#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
+
+static struct attribute *nvm_dev_attrs[] = {
+	&dev_attr_version.attr,
+	&dev_attr_vendor_opcode.attr,
+	&dev_attr_num_groups.attr,
+	&dev_attr_capabilities.attr,
+	&dev_attr_device_mode.attr,
+	&dev_attr_media_manager.attr,
+
+	&dev_attr_ppa_format.attr,
+	&dev_attr_media_type.attr,
+	&dev_attr_flash_media_type.attr,
+	&dev_attr_num_channels.attr,
+	&dev_attr_num_luns.attr,
+	&dev_attr_num_planes.attr,
+	&dev_attr_num_blocks.attr,
+	&dev_attr_num_pages.attr,
+	&dev_attr_page_size.attr,
+	&dev_attr_hw_sector_size.attr,
+	&dev_attr_oob_sector_size.attr,
+	&dev_attr_read_typ.attr,
+	&dev_attr_read_max.attr,
+	&dev_attr_prog_typ.attr,
+	&dev_attr_prog_max.attr,
+	&dev_attr_erase_typ.attr,
+	&dev_attr_erase_max.attr,
+	&dev_attr_multiplane_modes.attr,
+	&dev_attr_media_capabilities.attr,
+	&dev_attr_channel_parallelism.attr,
+	NULL,
+};
+
+static struct attribute_group nvm_dev_attr_group = {
+	.name = "lightnvm",
+	.attrs = nvm_dev_attrs,
+};
+
+
+const static struct attribute_group *nvm_dev_attr_groups[] = {
+	&nvm_dev_attr_group,
+	NULL,
+};
+
+static void nvm_dev_release(struct device *device)
+{
+	struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
+	struct request_queue *q = dev->q;
+
+	pr_debug("nvm/sysfs: `nvm_dev_release`\n");
+
+	blk_mq_unregister_dev(device, q);
+
+	nvm_free(dev);
+}
+
+static struct device_type nvm_type = {
+	.name		= "lightnvm",
+	.groups		= nvm_dev_attr_groups,
+	.release	= nvm_dev_release,
+};
+
+int nvm_sysfs_register_dev(struct nvm_dev *dev)
+{
+	if (!dev->parent_dev)
+		return 0;
+
+	dev->dev.parent = dev->parent_dev;
+	dev_set_name(&dev->dev, "%s", dev->name);
+	dev->dev.class = dev->parent_dev->class;
+	dev->dev.type = &nvm_type;
+	device_initialize(&dev->dev);
+	device_add(&dev->dev);
+
+	blk_mq_register_dev(&dev->dev, dev->q);
+
+	return 0;
+}
+
+void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
+{
+	if (dev && dev->parent_dev)
+		kobject_put(&dev->dev.kobj);
+}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ff20f1c..0e63546 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1095,7 +1095,7 @@ static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
-	struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 	struct nvme_ctrl *ctrl = ns->ctrl;
 	int serial_len = sizeof(ctrl->serial);
 	int model_len = sizeof(ctrl->model);
@@ -1119,7 +1119,7 @@ static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
-	struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 	return sprintf(buf, "%pU\n", ns->uuid);
 }
 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
@@ -1127,7 +1127,7 @@ static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
-	struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 	return sprintf(buf, "%8phd\n", ns->eui);
 }
 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
@@ -1135,7 +1135,7 @@ static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
-	struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 	return sprintf(buf, "%d\n", ns->ns_id);
 }
 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
@@ -1152,7 +1152,7 @@ static umode_t nvme_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
 	if (a == &dev_attr_uuid.attr) {
 		if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
@@ -1272,7 +1272,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
 
 	if (nvme_nvm_ns_supported(ns, id)) {
-		if (nvme_nvm_register(ns, disk_name, node)) {
+		if (nvme_nvm_register(ns, disk_name, node,
+							&nvme_ns_attr_group)) {
 			dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
 								__func__);
 			goto out_free_id;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index cd406c5..c9c7189 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -592,10 +592,12 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
 	.max_phys_sect		= 64,
 };
 
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+		      const struct attribute_group *attrs)
 {
 	struct request_queue *q = ns->queue;
 	struct nvm_dev *dev;
+	int ret;
 
 	dev = nvm_alloc_dev(node);
 	if (!dev)
@@ -604,9 +606,15 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 	dev->q = q;
 	memcpy(dev->name, disk_name, DISK_NAME_LEN);
 	dev->ops = &nvme_nvm_dev_ops;
+	dev->parent_dev = ns->ctrl->device;
+	dev->private_data = ns;
 	ns->ndev = dev;
 
-	return nvm_register(dev);
+	ret = nvm_register(dev);
+	if (sysfs_create_group(&dev->dev.kobj, attrs))
+		pr_warn("%s: failed to create sysfs group for identification\n",
+			ns->disk->disk_name);
+	return ret;
 }
 
 void nvme_nvm_unregister(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d62c6fd..d7f1afc 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -292,7 +292,8 @@ int nvme_sg_get_version_num(int __user *ip);
 
 #ifdef CONFIG_NVM
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+		      const struct attribute_group *attrs);
 void nvme_nvm_unregister(struct nvme_ns *ns);
 
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
@@ -304,7 +305,8 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 }
 #else
 static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
-								int node)
+				    int node,
+				    const struct attribute_group *attrs)
 {
 	return 0;
 }
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5afc263..d190786 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -352,7 +352,10 @@ struct nvm_dev {
 
 	/* Backend device */
 	struct request_queue *q;
+	struct device dev;
+	struct device *parent_dev;
 	char name[DISK_NAME_LEN];
+	void *private_data;
 
 	struct mutex mlock;
 	spinlock_t lock;
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC PATCH 6/6] lightnvm: expose gennvm target type through sysfs
  2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
                   ` (4 preceding siblings ...)
  2016-06-06 14:46 ` [RFC PATCH 5/6] lightnvm: expose device geometry through sysfs Matias Bjørling
@ 2016-06-06 14:46 ` Matias Bjørling
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-06-06 14:46 UTC (permalink / raw)
  To: linux-block, linux-kernel; +Cc: Simon Lund, Matias Bjørling

From: Simon Lund <slund@cnexlabs.com>

Create a sysfs directory for targets to expose their internal statistics
and knobs. The entries is exposed through the
/sys/block/<disk>/lightnvm/* directory.

It currently holds a single sysfs entry "type", which exports the target
type name.

Signed-off-by: Simon Lund <slund@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/gennvm.c | 109 ++++++++++++++++++++++++++++++++++++++--------
 include/linux/lightnvm.h  |   2 +
 2 files changed, 94 insertions(+), 17 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c..c48bc9c 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,90 @@
 
 #include "gennvm.h"
 
+#define GEN_TARGET_ATTR_RO(_name)					\
+	static struct attribute gen_target_##_name##_attr = {		\
+	.name = __stringify(_name),					\
+	.mode = S_IRUGO							\
+	}
+
+#define GEN_TARGET_ATTR_LIST(_name) (&gen_target_##_name##_attr)
+
+GEN_TARGET_ATTR_RO(type);
+
+static struct attribute *gen_target_attrs[] = {
+	GEN_TARGET_ATTR_LIST(type),
+	NULL,
+};
+
+static ssize_t gen_target_attr_show(struct kobject *kobj,
+				struct attribute *attr,
+				char *page)
+{
+	struct nvm_target *t = container_of(kobj, struct nvm_target, kobj);
+
+	if (strcmp(attr->name, "type") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%s\n", t->type->name);
+	} else {
+		return scnprintf(page, PAGE_SIZE,
+			"Unhandled attr(%s) in `nvm_target_attr_show`\n",
+			attr->name);
+	}
+}
+
+static const struct sysfs_ops target_sysfs_ops = {
+	.show = gen_target_attr_show,
+};
+
+static void gen_target_release(struct kobject *kobj)
+{
+	struct nvm_target *t = container_of(kobj, struct nvm_target, kobj);
+	struct nvm_tgt_type *tt = t->type;
+	struct gendisk *tdisk = t->disk;
+	struct request_queue *q = tdisk->queue;
+
+	pr_debug("gen: `gen_target_release`\n");
+
+	del_gendisk(tdisk);
+	blk_cleanup_queue(q);
+	put_disk(tdisk);
+
+	if (tt->exit)
+		tt->exit(tdisk->private_data);
+
+	kfree(t);
+}
+
+static struct kobj_type nvm_target_type = {
+	.sysfs_ops	= &target_sysfs_ops,
+	.default_attrs	= gen_target_attrs,
+	.release	= gen_target_release
+};
+
+void gen_unregister_target(struct nvm_target *t)
+{
+	kobject_uevent(&t->kobj, KOBJ_REMOVE);
+	kobject_del(&t->kobj);
+	kobject_put(&t->kobj);
+}
+
+int gen_register_target(struct nvm_target *t)
+{
+	struct gendisk *disk = t->disk;
+	struct device *dev = disk_to_dev(disk);
+	int ret;
+
+	ret = kobject_init_and_add(&t->kobj, &nvm_target_type,
+				   kobject_get(&dev->kobj), "%s", "lightnvm");
+	if (ret < 0) {
+		pr_err("gen: `_register_target` failed.\n");
+		kobject_put(&t->kobj);
+		return ret;
+	}
+
+	kobject_uevent(&t->kobj, KOBJ_ADD);
+	return 0;
+}
+
 static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
 {
 	struct nvm_target *tgt;
@@ -60,7 +144,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	}
 	mutex_unlock(&gn->lock);
 
-	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+	t = kzalloc(sizeof(struct nvm_target), GFP_KERNEL);
 	if (!t)
 		return -ENOMEM;
 
@@ -89,13 +173,16 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 
 	blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
 
-	set_capacity(tdisk, tt->capacity(targetdata));
-	add_disk(tdisk);
-
 	t->type = tt;
 	t->disk = tdisk;
 	t->dev = dev;
 
+	set_capacity(tdisk, tt->capacity(targetdata));
+	add_disk(tdisk);
+
+	if (gen_register_target(t))
+		goto err_init;
+
 	mutex_lock(&gn->lock);
 	list_add_tail(&t->list, &gn->targets);
 	mutex_unlock(&gn->lock);
@@ -112,20 +199,8 @@ err_t:
 
 static void __gen_remove_target(struct nvm_target *t)
 {
-	struct nvm_tgt_type *tt = t->type;
-	struct gendisk *tdisk = t->disk;
-	struct request_queue *q = tdisk->queue;
-
-	del_gendisk(tdisk);
-	blk_cleanup_queue(q);
-
-	if (tt->exit)
-		tt->exit(tdisk->private_data);
-
-	put_disk(tdisk);
-
 	list_del(&t->list);
-	kfree(t);
+	gen_unregister_target(t);
 }
 
 /**
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d190786..646ca24 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -211,6 +211,8 @@ struct nvm_target {
 	struct nvm_dev *dev;
 	struct nvm_tgt_type *type;
 	struct gendisk *disk;
+
+	struct kobject kobj;
 };
 
 struct nvm_tgt_instance {
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-06-06 14:46 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-06 14:45 [RFC PATCH 0/6] sysfs support for LightNVM Matias Bjørling
2016-06-06 14:45 ` [RFC PATCH 1/6] nvme: refactor namespaces to support non-gendisk devices Matias Bjørling
2016-06-06 14:45 ` [RFC PATCH 2/6] null_blk: refactor " Matias Bjørling
2016-06-06 14:46 ` [RFC PATCH 3/6] blk-mq: register device instead of disk Matias Bjørling
2016-06-06 14:46 ` [RFC PATCH 4/6] lightnvm: let drivers control the lifetime of nvm_dev Matias Bjørling
2016-06-06 14:46 ` [RFC PATCH 5/6] lightnvm: expose device geometry through sysfs Matias Bjørling
2016-06-06 14:46 ` [RFC PATCH 6/6] lightnvm: expose gennvm target type " Matias Bjørling

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.