linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] LightNVM fixes for 4.6
@ 2016-03-03 14:06 Matias Bjørling
  2016-03-03 14:06 ` [PATCH 1/5] lightnvm: specify target's logical address area Matias Bjørling
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe; +Cc: Matias Bjørling

Hi Jens,

A collection of fixes and features destined for 4.6. They contain:

 - Two fixes from Javier regarding logical to physical table loading.

 - Wenwei implemented support for multiple targets and luns in the
   rrpc target.

 - Small patch from me that exposes the completion bits for targets to
   use. Reviewed by Christoph.

Please pick up.

The patches are based on top of your current for-linus branch.

Javier González (2):
  lightnvm: do not reserve lun on l2p loading
  lightnvm: do not load L2P table if not supported

Matias Bjørling (1):
  nvme: lightnvm: return ppa completion status

Wenwei Tao (2):
  lightnvm: specify target's logical address area
  lightnvm: add a bitmap of luns

 drivers/lightnvm/core.c      |   7 +++
 drivers/lightnvm/gennvm.c    |  84 ++++++++++++++++++++++++++++++++-
 drivers/lightnvm/gennvm.h    |   6 +++
 drivers/lightnvm/rrpc.c      | 109 +++++++++++++++++++++++++++++++------------
 drivers/lightnvm/rrpc.h      |   1 +
 drivers/nvme/host/lightnvm.c |  17 ++++++-
 include/linux/lightnvm.h     |  14 ++++++
 7 files changed, 204 insertions(+), 34 deletions(-)

-- 
2.1.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/5] lightnvm: specify target's logical address area
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
@ 2016-03-03 14:06 ` Matias Bjørling
  2016-03-03 14:06 ` [PATCH 2/5] lightnvm: add a bitmap of luns Matias Bjørling
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe; +Cc: Wenwei Tao, Matias Bjørling

From: Wenwei Tao <ww.tao0320@gmail.com>

We can create more than one target on a lightnvm
device by specifying its begin lun and end lun.

But only specify the physical address area is not
enough, we need to get the corresponding non-
intersection logical address area division from
the backend device's logcial address space.
Otherwise the targets on the device might use
the same logical addresses cause incorrect
information in the device's l2p table.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/core.c   |  1 +
 drivers/lightnvm/gennvm.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/lightnvm/gennvm.h |  6 +++++
 drivers/lightnvm/rrpc.c   | 35 +++++++++++++++++++++++--
 drivers/lightnvm/rrpc.h   |  1 +
 include/linux/lightnvm.h  |  8 ++++++
 6 files changed, 116 insertions(+), 2 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 773a55d..fdff1bb 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -466,6 +466,7 @@ static int nvm_core_init(struct nvm_dev *dev)
 	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
 	INIT_LIST_HEAD(&dev->online_targets);
 	mutex_init(&dev->mlock);
+	spin_lock_init(&dev->lock);
 
 	return 0;
 }
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index d65ec36..d460b37 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,68 @@
 
 #include "gennvm.h"
 
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area, *prev, *next;
+	sector_t begin = 0;
+	sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+
+	if (len > max_sectors)
+		return -EINVAL;
+
+	area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+	if (!area)
+		return -ENOMEM;
+
+	prev = NULL;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(next, &gn->area_list, list) {
+		if (begin + len > next->begin) {
+			begin = next->end;
+			prev = next;
+			continue;
+		}
+		break;
+	}
+
+	if ((begin + len) > max_sectors) {
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return -EINVAL;
+	}
+
+	area->begin = *lba = begin;
+	area->end = begin + len;
+
+	if (prev) /* insert into sorted order */
+		list_add(&area->list, &prev->list);
+	else
+		list_add(&area->list, &gn->area_list);
+	spin_unlock(&dev->lock);
+
+	return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(area, &gn->area_list, list) {
+		if (area->begin != begin)
+			continue;
+
+		list_del(&area->list);
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return;
+	}
+	spin_unlock(&dev->lock);
+}
+
 static void gennvm_blocks_free(struct nvm_dev *dev)
 {
 	struct gen_nvm *gn = dev->mp;
@@ -229,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev)
 
 	gn->dev = dev;
 	gn->nr_luns = dev->nr_luns;
+	INIT_LIST_HEAD(&gn->area_list);
 	dev->mp = gn;
 
 	ret = gennvm_luns_init(dev, gn);
@@ -465,6 +528,10 @@ static struct nvmm_type gennvm = {
 
 	.get_lun		= gennvm_get_lun,
 	.lun_info_print		= gennvm_lun_info_print,
+
+	.get_area		= gennvm_get_area,
+	.put_area		= gennvm_put_area,
+
 };
 
 static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
 
 	int nr_luns;
 	struct gen_lun *luns;
+	struct list_head area_list;
 };
 
+struct gennvm_area {
+	struct list_head list;
+	sector_t begin;
+	sector_t end;	/* end is excluded */
+};
 #define gennvm_for_each_lun(bm, lun, i) \
 		for ((i) = 0, lun = &(bm)->luns[0]; \
 			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index f015fdc..fbfda86 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1051,8 +1051,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	sector_t i;
+	u64 slba;
 	int ret;
 
+	slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+
 	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
 	if (!rrpc->trans_map)
 		return -ENOMEM;
@@ -1074,7 +1077,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
 		return 0;
 
 	/* Bring up the mapping table from device */
-	ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
+	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
 									rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not read L2P table.\n");
@@ -1084,7 +1087,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
 	return 0;
 }
 
-
 /* Minimum pages needed within a lun */
 #define PAGE_POOL_SIZE 16
 #define ADDR_POOL_SIZE 64
@@ -1198,12 +1200,33 @@ err:
 	return -ENOMEM;
 }
 
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+	sector_t size = rrpc->nr_sects * dev->sec_size;
+
+	size >>= 9;
+
+	return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+
+	mt->put_area(dev, rrpc->soffset);
+}
+
 static void rrpc_free(struct rrpc *rrpc)
 {
 	rrpc_gc_free(rrpc);
 	rrpc_map_free(rrpc);
 	rrpc_core_free(rrpc);
 	rrpc_luns_free(rrpc);
+	rrpc_area_free(rrpc);
 
 	kfree(rrpc);
 }
@@ -1325,6 +1348,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	struct request_queue *bqueue = dev->q;
 	struct request_queue *tqueue = tdisk->queue;
 	struct rrpc *rrpc;
+	sector_t soffset;
 	int ret;
 
 	if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1350,6 +1374,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);
 
+	ret = rrpc_area_init(rrpc, &soffset);
+	if (ret < 0) {
+		pr_err("nvm: rrpc: could not initialize area\n");
+		return ERR_PTR(ret);
+	}
+	rrpc->soffset = soffset;
+
 	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 0577c4d..97d3aa1 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
 	struct nvm_dev *dev;
 	struct gendisk *disk;
 
+	sector_t soffset; /* logical sector offset */
 	u64 poffset; /* physical page offset */
 	int lun_offset;
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 8f8a743..3c51ffa 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@ struct nvm_dev {
 	char name[DISK_NAME_LEN];
 
 	struct mutex mlock;
+	spinlock_t lock;
 };
 
 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
 struct nvmm_type {
 	const char *name;
 	unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
+
+	nvmm_get_area_fn *get_area;
+	nvmm_put_area_fn *put_area;
+
 	struct list_head list;
 };
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/5] lightnvm: add a bitmap of luns
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
  2016-03-03 14:06 ` [PATCH 1/5] lightnvm: specify target's logical address area Matias Bjørling
@ 2016-03-03 14:06 ` Matias Bjørling
  2016-03-03 14:06 ` [PATCH 3/5] nvme: lightnvm: return ppa completion status Matias Bjørling
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe; +Cc: Wenwei Tao, Matias Bjørling

From: Wenwei Tao <ww.tao0320@gmail.com>

Add a bitmap of luns to indicate the status
of luns: inuse/available. When create targets
do the necessary check to avoid allocating luns
that are already allocated.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
Freed dev->lun_map if nvm_core_init later failed in the init process.
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/core.c   |  6 ++++
 drivers/lightnvm/gennvm.c | 18 ++++++++++++
 drivers/lightnvm/rrpc.c   | 74 ++++++++++++++++++++++++++++-------------------
 include/linux/lightnvm.h  |  5 ++++
 4 files changed, 74 insertions(+), 29 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index fdff1bb..b74cde8 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -464,6 +464,10 @@ static int nvm_core_init(struct nvm_dev *dev)
 	dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
 
 	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
+	dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+					sizeof(unsigned long), GFP_KERNEL);
+	if (!dev->lun_map)
+		return -ENOMEM;
 	INIT_LIST_HEAD(&dev->online_targets);
 	mutex_init(&dev->mlock);
 	spin_lock_init(&dev->lock);
@@ -584,6 +588,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
 
 	return 0;
 err_init:
+	kfree(dev->lun_map);
 	kfree(dev);
 	return ret;
 }
@@ -606,6 +611,7 @@ void nvm_unregister(char *disk_name)
 	up_write(&nvm_lock);
 
 	nvm_exit(dev);
+	kfree(dev->lun_map);
 	kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index d460b37..b97801c 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -192,6 +192,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
 		lun_id = div_u64(pba, dev->sec_per_lun);
 		lun = &gn->luns[lun_id];
 
+		if (!test_bit(lun_id, dev->lun_map))
+			__set_bit(lun_id, dev->lun_map);
+
 		/* Calculate block offset into lun */
 		pba = pba - (dev->sec_per_lun * lun_id);
 		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
@@ -482,10 +485,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
 	return nvm_erase_ppa(dev, &addr, 1);
 }
 
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+	return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
 {
 	struct gen_nvm *gn = dev->mp;
 
+	if (unlikely(lunid >= dev->nr_luns))
+		return NULL;
+
 	return &gn->luns[lunid].vlun;
 }
 
@@ -527,6 +543,8 @@ static struct nvmm_type gennvm = {
 	.erase_blk		= gennvm_erase_blk,
 
 	.get_lun		= gennvm_get_lun,
+	.reserve_lun		= gennvm_reserve_lun,
+	.release_lun		= gennvm_release_lun,
 	.lun_info_print		= gennvm_lun_info_print,
 
 	.get_area		= gennvm_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index fbfda86..a9a5fcc 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -963,25 +963,11 @@ static void rrpc_requeue(struct work_struct *work)
 
 static void rrpc_gc_free(struct rrpc *rrpc)
 {
-	struct rrpc_lun *rlun;
-	int i;
-
 	if (rrpc->krqd_wq)
 		destroy_workqueue(rrpc->krqd_wq);
 
 	if (rrpc->kgc_wq)
 		destroy_workqueue(rrpc->kgc_wq);
-
-	if (!rrpc->luns)
-		return;
-
-	for (i = 0; i < rrpc->nr_luns; i++) {
-		rlun = &rrpc->luns[i];
-
-		if (!rlun->blocks)
-			break;
-		vfree(rlun->blocks);
-	}
 }
 
 static int rrpc_gc_init(struct rrpc *rrpc)
@@ -1141,6 +1127,23 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_lun *lun;
+	struct rrpc_lun *rlun;
+	int i;
+
+	if (!rrpc->luns)
+		return;
+
+	for (i = 0; i < rrpc->nr_luns; i++) {
+		rlun = &rrpc->luns[i];
+		lun = rlun->parent;
+		if (!lun)
+			break;
+		dev->mt->release_lun(dev, lun->id);
+		vfree(rlun->blocks);
+	}
+
 	kfree(rrpc->luns);
 }
 
@@ -1148,7 +1151,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	struct rrpc_lun *rlun;
-	int i, j;
+	int i, j, ret = -EINVAL;
 
 	if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
 		pr_err("rrpc: number of pages per block too high.");
@@ -1164,25 +1167,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 	/* 1:1 mapping */
 	for (i = 0; i < rrpc->nr_luns; i++) {
-		struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+		int lunid = lun_begin + i;
+		struct nvm_lun *lun;
+
+		if (dev->mt->reserve_lun(dev, lunid)) {
+			pr_err("rrpc: lun %u is already allocated\n", lunid);
+			goto err;
+		}
+
+		lun = dev->mt->get_lun(dev, lunid);
+		if (!lun)
+			goto err;
 
 		rlun = &rrpc->luns[i];
-		rlun->rrpc = rrpc;
 		rlun->parent = lun;
-		INIT_LIST_HEAD(&rlun->prio_list);
-		INIT_LIST_HEAD(&rlun->open_list);
-		INIT_LIST_HEAD(&rlun->closed_list);
-
-		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
-		spin_lock_init(&rlun->lock);
-
-		rrpc->total_blocks += dev->blks_per_lun;
-		rrpc->nr_sects += dev->sec_per_lun;
-
 		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
 						rrpc->dev->blks_per_lun);
-		if (!rlun->blocks)
+		if (!rlun->blocks) {
+			ret = -ENOMEM;
 			goto err;
+		}
 
 		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
 			struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1193,11 +1197,23 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 			INIT_LIST_HEAD(&rblk->prio);
 			spin_lock_init(&rblk->lock);
 		}
+
+		rlun->rrpc = rrpc;
+		INIT_LIST_HEAD(&rlun->prio_list);
+		INIT_LIST_HEAD(&rlun->open_list);
+		INIT_LIST_HEAD(&rlun->closed_list);
+
+		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+		spin_lock_init(&rlun->lock);
+
+		rrpc->total_blocks += dev->blks_per_lun;
+		rrpc->nr_sects += dev->sec_per_lun;
+
 	}
 
 	return 0;
 err:
-	return -ENOMEM;
+	return ret;
 }
 
 /* returns 0 on success and stores the beginning address in *begin */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 3c51ffa..e27161f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -342,6 +342,7 @@ struct nvm_dev {
 	int nr_luns;
 	unsigned max_pages_per_blk;
 
+	unsigned long *lun_map;
 	void *ppalist_pool;
 
 	struct nvm_id identity;
@@ -462,6 +463,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
 								unsigned long);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
+typedef void (nvmm_release_lun)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -488,6 +491,8 @@ struct nvmm_type {
 
 	/* Configuration management */
 	nvmm_get_lun_fn *get_lun;
+	nvmm_reserve_lun *reserve_lun;
+	nvmm_release_lun *release_lun;
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/5] nvme: lightnvm: return ppa completion status
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
  2016-03-03 14:06 ` [PATCH 1/5] lightnvm: specify target's logical address area Matias Bjørling
  2016-03-03 14:06 ` [PATCH 2/5] lightnvm: add a bitmap of luns Matias Bjørling
@ 2016-03-03 14:06 ` Matias Bjørling
  2016-03-03 14:06 ` [PATCH 4/5] lightnvm: do not reserve lun on l2p loading Matias Bjørling
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe; +Cc: Matias Bjørling

PPAs sent to device is separately acknowledge in a 64bit status
variable. The status is stored in DW0 and DW1 of the completion queue
entry. Store this status inside the nvm_rq for further processing.

This can later be used to implement retry techniques for failed writes
and reads.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/nvme/host/lightnvm.c | 17 +++++++++++++++--
 include/linux/lightnvm.h     |  1 +
 2 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 42a01a9..9461dd6 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,6 +146,14 @@ struct nvme_nvm_command {
 	};
 };
 
+struct nvme_nvm_completion {
+	__le64	result;		/* Used by LightNVM to return ppa completions */
+	__le16	sq_head;	/* how much of this queue may be reclaimed */
+	__le16	sq_id;		/* submission queue that generated this entry */
+	__u16	command_id;	/* of the command which completed */
+	__le16	status;		/* did the command fail, and if so, why? */
+};
+
 #define NVME_NVM_LP_MLC_PAIRS 886
 struct nvme_nvm_lp_mlc {
 	__u16			num_pairs;
@@ -507,6 +515,10 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
 static void nvme_nvm_end_io(struct request *rq, int error)
 {
 	struct nvm_rq *rqd = rq->end_io_data;
+	struct nvme_nvm_completion *cqe = rq->special;
+
+	if (cqe)
+		rqd->ppa_status = le64_to_cpu(cqe->result);
 
 	nvm_end_io(rqd, error);
 
@@ -526,7 +538,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 	if (IS_ERR(rq))
 		return -ENOMEM;
 
-	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+	cmd = kzalloc(sizeof(struct nvme_nvm_command) +
+				sizeof(struct nvme_nvm_completion), GFP_KERNEL);
 	if (!cmd) {
 		blk_mq_free_request(rq);
 		return -ENOMEM;
@@ -545,7 +558,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 
 	rq->cmd = (unsigned char *)cmd;
 	rq->cmd_len = sizeof(struct nvme_nvm_command);
-	rq->special = (void *)0;
+	rq->special = cmd + 1;
 
 	rq->end_io_data = rqd;
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e27161f..643bdf4 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -238,6 +238,7 @@ struct nvm_rq {
 	uint16_t nr_pages;
 	uint16_t flags;
 
+	u64 ppa_status; /* ppa media status */
 	int error;
 };
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/5] lightnvm: do not reserve lun on l2p loading
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
                   ` (2 preceding siblings ...)
  2016-03-03 14:06 ` [PATCH 3/5] nvme: lightnvm: return ppa completion status Matias Bjørling
@ 2016-03-03 14:06 ` Matias Bjørling
  2016-03-03 14:06 ` [PATCH 5/5] lightnvm: do not load L2P table if not supported Matias Bjørling
  2016-03-03 15:04 ` [PATCH 0/5] LightNVM fixes for 4.6 Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe
  Cc: Javier González, Javier González, Matias Bjørling

From: Javier González <jg@lightnvm.io>

When the l2p table is loaded, addresses are checked for the lun they
belong to and luns are reserved accordingly. This assumes that metadata
is being stored in the backend device to recover the previous target
configuration. Since this is not yet implemented, this check collides
with some of the core initialization (e.g., sysblock initialization when
a page is formed by several sectors).

We take this check out and for now rely on that the right target will be
created instead. When metadata is stored to recover a target, this check
will come natural as part of the recovery strategy.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/gennvm.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b97801c..42c1c2a 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -192,9 +192,6 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
 		lun_id = div_u64(pba, dev->sec_per_lun);
 		lun = &gn->luns[lun_id];
 
-		if (!test_bit(lun_id, dev->lun_map))
-			__set_bit(lun_id, dev->lun_map);
-
 		/* Calculate block offset into lun */
 		pba = pba - (dev->sec_per_lun * lun_id);
 		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 5/5] lightnvm: do not load L2P table if not supported
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
                   ` (3 preceding siblings ...)
  2016-03-03 14:06 ` [PATCH 4/5] lightnvm: do not reserve lun on l2p loading Matias Bjørling
@ 2016-03-03 14:06 ` Matias Bjørling
  2016-03-03 15:04 ` [PATCH 0/5] LightNVM fixes for 4.6 Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Matias Bjørling @ 2016-03-03 14:06 UTC (permalink / raw)
  To: linux-block, linux-kernel, axboe
  Cc: Javier González, Javier González, Matias Bjørling

From: Javier González <jg@lightnvm.io>

An Open-Channel SSD can work on two modes: (i) hybrid mode, where the
L2P table is maintained both by the host and by the device; and (ii)
full host-based, where the L2P table is uniquely maintained by the host.

In the advent of a new target implementing the full host-based mode, do
not assume that the L2P table must be loaded on the generic media
manager; check device properties loaded on the identify command instead.

Signed-off-by: Javier González <javier@cnexlabs.com>
Moved into the following statement.
Signed-off-by: Matias Bjørling <m@bjorling.me>
---
 drivers/lightnvm/gennvm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 42c1c2a..72e124a 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -257,7 +257,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
 		}
 	}
 
-	if (dev->ops->get_l2p_tbl) {
+	if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
 		ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
 							gennvm_block_map, dev);
 		if (ret) {
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/5] LightNVM fixes for 4.6
  2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
                   ` (4 preceding siblings ...)
  2016-03-03 14:06 ` [PATCH 5/5] lightnvm: do not load L2P table if not supported Matias Bjørling
@ 2016-03-03 15:04 ` Jens Axboe
  5 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2016-03-03 15:04 UTC (permalink / raw)
  To: Matias Bjørling, linux-block, linux-kernel

On 03/03/2016 07:06 AM, Matias Bjørling wrote:
> Hi Jens,
>
> A collection of fixes and features destined for 4.6. They contain:
>
>   - Two fixes from Javier regarding logical to physical table loading.
>
>   - Wenwei implemented support for multiple targets and luns in the
>     rrpc target.
>
>   - Small patch from me that exposes the completion bits for targets to
>     use. Reviewed by Christoph.
>
> Please pick up.
>
> The patches are based on top of your current for-linus branch.

4.6 patches should be based on the for-4.6/drivers branch, for-linus is 
for the current series. Hence they don't apply :-)

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-03-03 15:04 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-03-03 14:06 [PATCH 0/5] LightNVM fixes for 4.6 Matias Bjørling
2016-03-03 14:06 ` [PATCH 1/5] lightnvm: specify target's logical address area Matias Bjørling
2016-03-03 14:06 ` [PATCH 2/5] lightnvm: add a bitmap of luns Matias Bjørling
2016-03-03 14:06 ` [PATCH 3/5] nvme: lightnvm: return ppa completion status Matias Bjørling
2016-03-03 14:06 ` [PATCH 4/5] lightnvm: do not reserve lun on l2p loading Matias Bjørling
2016-03-03 14:06 ` [PATCH 5/5] lightnvm: do not load L2P table if not supported Matias Bjørling
2016-03-03 15:04 ` [PATCH 0/5] LightNVM fixes for 4.6 Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).