linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/7] lightnvm: enable to send hint to erase command
@ 2016-10-27 14:49 Javier González
  2016-10-27 14:49 ` [PATCH 2/7] lightnvm: do not decide on device blocks Javier González
                   ` (5 more replies)
  0 siblings, 6 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

Erases might be subject to host hints. An example is multi-plane
programming to erase blocks in parallel. Enable targets to specify this
hints.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/core.c      | 9 ++++++---
 drivers/lightnvm/gennvm.c    | 5 ++---
 drivers/lightnvm/rrpc.c      | 2 +-
 drivers/lightnvm/sysblk.c    | 4 ++--
 drivers/nvme/host/lightnvm.c | 1 +
 include/linux/lightnvm.h     | 7 +++----
 6 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index a2393e1..f752087 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -204,9 +204,9 @@ int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 }
 EXPORT_SYMBOL(nvm_submit_io);
 
-int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
 {
-	return dev->mt->erase_blk(dev, blk, 0);
+	return dev->mt->erase_blk(dev, blk, flags);
 }
 EXPORT_SYMBOL(nvm_erase_blk);
 
@@ -287,7 +287,8 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
 }
 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
 
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+								int flags)
 {
 	struct nvm_rq rqd;
 	int ret;
@@ -303,6 +304,8 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
 
 	nvm_generic_to_addr_mode(dev, &rqd);
 
+	rqd.flags = flags;
+
 	ret = dev->ops->erase_block(dev, &rqd);
 
 	nvm_free_rqd_ppalist(dev, &rqd);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c..730d736 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -593,12 +593,11 @@ static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 	return dev->ops->submit_io(dev, rqd);
 }
 
-static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
-							unsigned long flags)
+static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
 {
 	struct ppa_addr addr = block_to_ppa(dev, blk);
 
-	return nvm_erase_ppa(dev, &addr, 1);
+	return nvm_erase_ppa(dev, &addr, 1, flags);
 }
 
 static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaad..067e890 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -404,7 +404,7 @@ static void rrpc_block_gc(struct work_struct *work)
 	if (rrpc_move_valid_pages(rrpc, rblk))
 		goto put_back;
 
-	if (nvm_erase_blk(dev, rblk->parent))
+	if (nvm_erase_blk(dev, rblk->parent, 0))
 		goto put_back;
 
 	rrpc_put_blk(rrpc, rblk);
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a75bd28..d229067 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -379,7 +379,7 @@ static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
 		ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
 		ppa->g.pg = ppa_to_slc(dev, 0);
 
-		ret = nvm_erase_ppa(dev, ppa, 1);
+		ret = nvm_erase_ppa(dev, ppa, 1, 0);
 		if (ret)
 			return ret;
 
@@ -725,7 +725,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
 	/* continue to erase until list of blks until empty */
 	while ((ppa_cnt =
 			nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
-		nvm_erase_ppa(dev, ppas, ppa_cnt);
+		nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
 
 	/* mark host reserved blocks free */
 	if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5e3011..9470d51 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -543,6 +543,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
 	c.erase.nsid = cpu_to_le32(ns->ns_id);
 	c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 	c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
+	c.erase.control = cpu_to_le16(rqd->flags);
 
 	return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
 }
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d190786..d7da953 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -472,8 +472,7 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
 typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
 typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
 typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
-								unsigned long);
+typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, int);
 typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
 typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
@@ -539,8 +538,8 @@ extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
 					const struct ppa_addr *, int, int);
 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
-extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
+extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *, int);
 extern void nvm_end_io(struct nvm_rq *, int);
 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
 								void *, int);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/7] lightnvm: do not decide on device blocks
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
@ 2016-10-27 14:49 ` Javier González
  2016-10-27 14:49 ` [PATCH 3/7] lightnvm: manage block list on LUN owner Javier González
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

Device blocks should be marked by the device and considered as bad
blocks by the media manager. Thus, do not make assumptions on which
blocks are going to be used by the device. In doing so we might lose
valid blocks from the free list.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/gennvm.c | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 730d736..a7e17fa 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -371,12 +371,6 @@ static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
 			block->lun = &lun->vlun;
 			block->id = cur_block_id++;
 
-			/* First block is reserved for device */
-			if (unlikely(lun_iter == 0 && blk_iter == 0)) {
-				lun->vlun.nr_free_blocks--;
-				continue;
-			}
-
 			list_add_tail(&block->list, &lun->free_list);
 		}
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/7] lightnvm: manage block list on LUN owner
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
  2016-10-27 14:49 ` [PATCH 2/7] lightnvm: do not decide on device blocks Javier González
@ 2016-10-27 14:49 ` Javier González
  2016-10-27 14:49 ` [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks Javier González
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

LUNs can be exclusively owned by a target through the media manager's
reserve_lun function. In this case, the target should implement its own
provisioning and manage internally the free/used/bad block list.

This patch introduces a LUN management structure that can be passed on
to LUN exclusive owners. The media manager is the default owner. On boot
up, it populates the lists with based on the device's bad block table
and sysblk metadata. Then, if the LUN is owned by a target, the
management structured is passed on to it. From this moment, the target
is responsible for list management. Thus, since LUNs are managed
strictly by the target, there is no need for the media manager to
reserve GC blocks.

As a FTL, rrpc owns exclusively its LUNs. Therefore, adapt rrpc to use
the new management interface.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/core.c   |  5 ++--
 drivers/lightnvm/gennvm.c | 76 +++++++++++++++++++++++++++++++----------------
 drivers/lightnvm/gennvm.h | 19 ++++++------
 drivers/lightnvm/rrpc.c   | 74 ++++++++++++++++++++++++++++++++++++++-------
 drivers/lightnvm/rrpc.h   |  4 +++
 include/linux/lightnvm.h  | 21 +++++++++----
 6 files changed, 145 insertions(+), 54 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index f752087..4be3879 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -178,10 +178,9 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 	return NULL;
 }
 
-struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
-							unsigned long flags)
+struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun)
 {
-	return dev->mt->get_blk(dev, lun, flags);
+	return dev->mt->get_blk(dev, lun);
 }
 EXPORT_SYMBOL(nvm_get_blk);
 
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index a7e17fa..8bff725 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -243,6 +243,7 @@ static void gen_luns_free(struct nvm_dev *dev)
 static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
 {
 	struct gen_lun *lun;
+	struct nvm_lun_mgmt *mgmt;
 	int i;
 
 	gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
@@ -250,18 +251,31 @@ static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
 		return -ENOMEM;
 
 	gen_for_each_lun(gn, lun, i) {
+		mgmt = kmalloc(sizeof(struct nvm_lun_mgmt), GFP_KERNEL);
+		if (!mgmt)
+			goto free;
+
+		lun->mgmt = mgmt;
+		lun->tgt = NULL;
+
 		spin_lock_init(&lun->vlun.lock);
-		INIT_LIST_HEAD(&lun->free_list);
-		INIT_LIST_HEAD(&lun->used_list);
-		INIT_LIST_HEAD(&lun->bb_list);
+		INIT_LIST_HEAD(&lun->mgmt->free_list);
+		INIT_LIST_HEAD(&lun->mgmt->used_list);
+		INIT_LIST_HEAD(&lun->mgmt->bb_list);
+		lun->mgmt->nr_free_blocks = dev->blks_per_lun;
 
-		lun->reserved_blocks = 2; /* for GC only */
 		lun->vlun.id = i;
 		lun->vlun.lun_id = i % dev->luns_per_chnl;
 		lun->vlun.chnl_id = i / dev->luns_per_chnl;
-		lun->vlun.nr_free_blocks = dev->blks_per_lun;
+		lun->vlun.priv = NULL;
 	}
 	return 0;
+
+free:
+	gen_for_each_lun(gn, lun, i)
+		kfree(lun->mgmt);
+
+	return -ENOMEM;
 }
 
 static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
@@ -279,12 +293,13 @@ static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
 	lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
 
 	for (i = 0; i < nr_blks; i++) {
-		if (blks[i] == 0)
+		if (blks[i] == NVM_BLK_T_FREE && i > 0)
 			continue;
 
 		blk = &lun->vlun.blocks[i];
-		list_move_tail(&blk->list, &lun->bb_list);
-		lun->vlun.nr_free_blocks--;
+		list_move_tail(&blk->list, &lun->mgmt->bb_list);
+		blk->state = NVM_BLK_ST_BAD;
+		lun->mgmt->nr_free_blocks--;
 	}
 
 	return 0;
@@ -333,9 +348,9 @@ static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
 			 * block. It's up to the FTL on top to re-etablish the
 			 * block state. The block is assumed to be open.
 			 */
-			list_move_tail(&blk->list, &lun->used_list);
+			list_move_tail(&blk->list, &lun->mgmt->used_list);
 			blk->state = NVM_BLK_ST_TGT;
-			lun->vlun.nr_free_blocks--;
+			lun->mgmt->nr_free_blocks--;
 		}
 	}
 
@@ -371,7 +386,7 @@ static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
 			block->lun = &lun->vlun;
 			block->id = cur_block_id++;
 
-			list_add_tail(&block->list, &lun->free_list);
+			list_add_tail(&block->list, &lun->mgmt->free_list);
 		}
 
 		if (dev->ops->get_bb_tbl) {
@@ -467,30 +482,30 @@ static void gen_unregister(struct nvm_dev *dev)
 	module_put(THIS_MODULE);
 }
 
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
-				struct nvm_lun *vlun, unsigned long flags)
+static struct nvm_block *gen_get_blk(struct nvm_dev *dev, struct nvm_lun *vlun)
 {
 	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
 	struct nvm_block *blk = NULL;
-	int is_gc = flags & NVM_IOTYPE_GC;
 
 	spin_lock(&vlun->lock);
-	if (list_empty(&lun->free_list)) {
-		pr_err_ratelimited("gen: lun %u have no free pages available",
-								lun->vlun.id);
+	if (test_bit(vlun->id, dev->lun_map)) {
+		pr_err("gen: bad get block - lun:%d not managed by mm\n",
+				vlun->id);
 		goto out;
 	}
 
-	if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
+	if (list_empty(&lun->mgmt->free_list))
 		goto out;
 
-	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
+	blk = list_first_entry(&lun->mgmt->free_list, struct nvm_block, list);
 
-	list_move_tail(&blk->list, &lun->used_list);
+	list_move_tail(&blk->list, &lun->mgmt->used_list);
 	blk->state = NVM_BLK_ST_TGT;
-	lun->vlun.nr_free_blocks--;
+	lun->mgmt->nr_free_blocks--;
+
 out:
 	spin_unlock(&vlun->lock);
+
 	return blk;
 }
 
@@ -500,19 +515,28 @@ static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
 	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
 
 	spin_lock(&vlun->lock);
+
+	if (test_bit(vlun->id, dev->lun_map)) {
+		pr_err("gen: bad put block - lun:%d not managed by mm\n",
+				vlun->id);
+		goto out;
+	}
+
 	if (blk->state & NVM_BLK_ST_TGT) {
-		list_move_tail(&blk->list, &lun->free_list);
-		lun->vlun.nr_free_blocks++;
+		list_move_tail(&blk->list, &lun->mgmt->free_list);
+		lun->mgmt->nr_free_blocks++;
 		blk->state = NVM_BLK_ST_FREE;
 	} else if (blk->state & NVM_BLK_ST_BAD) {
-		list_move_tail(&blk->list, &lun->bb_list);
+		list_move_tail(&blk->list, &lun->mgmt->bb_list);
 		blk->state = NVM_BLK_ST_BAD;
 	} else {
 		WARN_ON_ONCE(1);
 		pr_err("gen: erroneous block type (%lu -> %u)\n",
 							blk->id, blk->state);
-		list_move_tail(&blk->list, &lun->bb_list);
+		list_move_tail(&blk->list, &lun->mgmt->bb_list);
 	}
+
+out:
 	spin_unlock(&vlun->lock);
 }
 
@@ -625,7 +649,7 @@ static void gen_lun_info_print(struct nvm_dev *dev)
 		spin_lock(&lun->vlun.lock);
 
 		pr_info("%s: lun%8u\t%u\n", dev->name, i,
-						lun->vlun.nr_free_blocks);
+						lun->mgmt->nr_free_blocks);
 
 		spin_unlock(&lun->vlun.lock);
 	}
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa81..f19d25a 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -23,15 +23,16 @@
 struct gen_lun {
 	struct nvm_lun vlun;
 
-	int reserved_blocks;
-	/* lun block lists */
-	struct list_head used_list;	/* In-use blocks */
-	struct list_head free_list;	/* Not used blocks i.e. released
-					 * and ready for use
-					 */
-	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
-					 * free_list and used_list
-					 */
+	/* A LUN can either be managed by the media manager if it is shared
+	 * among several used through the generic get/put block interface or
+	 * exclusively owned by a target. In this case, the target manages
+	 * the LUN. gen_lun always maintains a reference to the LUN management.
+	 *
+	 * Exclusive access is managed by the dev->lun_map bitmask. 0:
+	 * non-exclusive, 1: exclusive.
+	 */
+	struct nvm_lun_mgmt *mgmt;
+	struct nvm_target *tgt;
 };
 
 struct gen_dev {
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 067e890..f293d00 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -188,17 +188,45 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
 	*cur_rblk = new_rblk;
 }
 
+static struct nvm_block *__rrpc_get_blk(struct rrpc *rrpc,
+							struct rrpc_lun *rlun)
+{
+	struct nvm_block *blk = NULL;
+
+	if (list_empty(&rlun->mgmt->free_list))
+		goto out;
+
+	blk = list_first_entry(&rlun->mgmt->free_list, struct nvm_block, list);
+
+	list_move_tail(&blk->list, &rlun->mgmt->used_list);
+	blk->state = NVM_BLK_ST_TGT;
+	rlun->mgmt->nr_free_blocks--;
+
+out:
+	return blk;
+}
+
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 							unsigned long flags)
 {
 	struct nvm_block *blk;
 	struct rrpc_block *rblk;
+	int is_gc = flags & NVM_IOTYPE_GC;
 
-	blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
+	spin_lock(&rlun->lock);
+	if (!is_gc && rlun->mgmt->nr_free_blocks < rlun->reserved_blocks) {
+		pr_err("nvm: rrpc: cannot give block to non GC request\n");
+		spin_unlock(&rlun->lock);
+		return NULL;
+	}
+
+	blk = __rrpc_get_blk(rrpc, rlun);
 	if (!blk) {
-		pr_err("nvm: rrpc: cannot get new block from media manager\n");
+		pr_err("nvm: rrpc: cannot get new block\n");
+		spin_unlock(&rlun->lock);
 		return NULL;
 	}
+	spin_unlock(&rlun->lock);
 
 	rblk = rrpc_get_rblk(rlun, blk->id);
 	blk->priv = rblk;
@@ -212,7 +240,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 
 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	nvm_put_blk(rrpc->dev, rblk->parent);
+	struct nvm_block *blk = rblk->parent;
+	struct rrpc_lun *rlun = rblk->rlun;
+
+	spin_lock(&rlun->lock);
+	if (blk->state & NVM_BLK_ST_TGT) {
+		list_move_tail(&blk->list, &rlun->mgmt->free_list);
+		rlun->mgmt->nr_free_blocks++;
+		blk->state = NVM_BLK_ST_FREE;
+	} else if (blk->state & NVM_BLK_ST_BAD) {
+		list_move_tail(&blk->list, &rlun->mgmt->bb_list);
+		blk->state = NVM_BLK_ST_BAD;
+	} else {
+		WARN_ON_ONCE(1);
+		pr_err("rrpc: erroneous block type (%lu -> %u)\n",
+							blk->id, blk->state);
+		list_move_tail(&blk->list, &rlun->mgmt->bb_list);
+	}
+	spin_unlock(&rlun->lock);
 }
 
 static void rrpc_put_blks(struct rrpc *rrpc)
@@ -450,7 +495,6 @@ static void rrpc_lun_gc(struct work_struct *work)
 {
 	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
 	struct rrpc *rrpc = rlun->rrpc;
-	struct nvm_lun *lun = rlun->parent;
 	struct rrpc_block_gc *gcb;
 	unsigned int nr_blocks_need;
 
@@ -460,7 +504,7 @@ static void rrpc_lun_gc(struct work_struct *work)
 		nr_blocks_need = rrpc->nr_luns;
 
 	spin_lock(&rlun->lock);
-	while (nr_blocks_need > lun->nr_free_blocks &&
+	while (nr_blocks_need > rlun->mgmt->nr_free_blocks &&
 					!list_empty(&rlun->prio_list)) {
 		struct rrpc_block *rblock = block_prio_find_max(rlun);
 		struct nvm_block *block = rblock->parent;
@@ -529,8 +573,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
 	 * estimate.
 	 */
 	rrpc_for_each_lun(rrpc, rlun, i) {
-		if (rlun->parent->nr_free_blocks >
-					max_free->parent->nr_free_blocks)
+		if (rlun->mgmt->nr_free_blocks > max_free->mgmt->nr_free_blocks)
 			max_free = rlun;
 	}
 
@@ -587,14 +630,12 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
 {
 	struct rrpc_lun *rlun;
 	struct rrpc_block *rblk, **cur_rblk;
-	struct nvm_lun *lun;
 	u64 paddr;
 	int gc_force = 0;
 
 	rlun = rrpc_get_lun_rr(rrpc, is_gc);
-	lun = rlun->parent;
 
-	if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
+	if (!is_gc && rlun->mgmt->nr_free_blocks < rrpc->nr_luns * 4)
 		return NULL;
 
 	/*
@@ -1175,11 +1216,20 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 		}
 
 		lun = dev->mt->get_lun(dev, lunid);
-		if (!lun)
+		if (!lun) {
+			pr_err("rrpc: cannot get lun %d\n", lun->id);
 			goto err;
+		}
+
+		if (!lun->priv) {
+			pr_err("rrpc: lun %d not allocated exclusively\n",
+								lun->id);
+			goto err;
+		}
 
 		rlun = &rrpc->luns[i];
 		rlun->parent = lun;
+		rlun->mgmt = lun->priv;
 		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
 						rrpc->dev->blks_per_lun);
 		if (!rlun->blocks) {
@@ -1197,6 +1247,8 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 			spin_lock_init(&rblk->lock);
 		}
 
+		rlun->reserved_blocks = 2; /* for GC only */
+
 		rlun->rrpc = rrpc;
 		INIT_LIST_HEAD(&rlun->prio_list);
 		INIT_LIST_HEAD(&rlun->wblk_list);
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 5e87d52..b8b3ad0 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -75,11 +75,15 @@ struct rrpc_lun {
 	struct rrpc_block *cur, *gc_cur;
 	struct rrpc_block *blocks;	/* Reference to block allocation */
 
+	struct nvm_lun_mgmt *mgmt;
+
 	struct list_head prio_list;	/* Blocks that may be GC'ed */
 	struct list_head wblk_list;	/* Queued blocks to be written to */
 
 	struct work_struct ws_gc;
 
+	int reserved_blocks;
+
 	spinlock_t lock;
 };
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d7da953..14c6fa5 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -273,8 +273,8 @@ struct nvm_lun {
 
 	spinlock_t lock;
 
-	unsigned int nr_free_blocks;	/* Number of unused blocks */
 	struct nvm_block *blocks;
+	void *priv;
 };
 
 enum {
@@ -452,6 +452,19 @@ struct nvm_tgt_type {
 	struct list_head list;
 };
 
+struct nvm_lun_mgmt {
+	/* lun block lists */
+	struct list_head used_list;	/* In-use blocks */
+	struct list_head free_list;	/* Not used blocks i.e. released
+					 * and ready for use
+					 */
+	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
+					 * free_list and used_list
+					 */
+
+	unsigned int nr_free_blocks;	/* Number of unused blocks */
+};
+
 extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
 
 extern int nvm_register_tgt_type(struct nvm_tgt_type *);
@@ -465,8 +478,7 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *);
 
 typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
 typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
-typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
-					      struct nvm_lun *, unsigned long);
+typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, struct nvm_lun *);
 typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
 typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
 typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
@@ -522,8 +534,7 @@ struct nvmm_type {
 extern int nvm_register_mgr(struct nvmm_type *);
 extern void nvm_unregister_mgr(struct nvmm_type *);
 
-extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
-								unsigned long);
+extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *);
 extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
 
 extern struct nvm_dev *nvm_alloc_dev(int);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
  2016-10-27 14:49 ` [PATCH 2/7] lightnvm: do not decide on device blocks Javier González
  2016-10-27 14:49 ` [PATCH 3/7] lightnvm: manage block list on LUN owner Javier González
@ 2016-10-27 14:49 ` Javier González
  2016-10-27 14:49 ` [PATCH 5/7] lightnvm: export set bad block table Javier González
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

On target initialization, targets use callbacks to the media manager to
configure the LUNs they use. In order to simplify the flow, drop this
callbacks and manage everything internally on the media manager.

By making use of the newly introduce LUN management structure, the media
manager knows which target exclusively owns each target and can
therefore allocate and free all the necessary structures before
initializing the target. Not exclusively owned LUNs belong to the media
manager in any case.

Adapt rrpc to not use the reserve_lun/release_lun callback functions.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/gennvm.c | 62 +++++++++++++++++++++++++++++++++++------------
 drivers/lightnvm/rrpc.c   | 12 +--------
 include/linux/lightnvm.h  |  5 +---
 3 files changed, 49 insertions(+), 30 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 8bff725..575afc4 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,30 @@ static const struct block_device_operations gen_fops = {
 	.owner		= THIS_MODULE,
 };
 
+static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
+			    struct nvm_target *t)
+{
+	struct gen_dev *gn = dev->mp;
+	struct gen_lun *lun;
+	int i;
+
+	for (i = lun_begin; i <= lun_end; i++) {
+		if (test_and_set_bit(i, dev->lun_map)) {
+			pr_err("gennvm: lun %d is already allocated\n", i);
+			goto fail;
+		}
+
+		lun = &gn->luns[i];
+	}
+
+	return 0;
+fail:
+	while (--i > lun_begin)
+		clear_bit(i, dev->lun_map);
+
+	return 1;
+}
+
 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 {
 	struct gen_dev *gn = dev->mp;
@@ -80,6 +104,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	tdisk->fops = &gen_fops;
 	tdisk->queue = tqueue;
 
+	if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
+		goto err_init;
+
 	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
 	if (IS_ERR(targetdata))
 		goto err_init;
@@ -110,7 +137,23 @@ err_t:
 	return -ENOMEM;
 }
 
-static void __gen_remove_target(struct nvm_target *t)
+static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
+{
+	struct gen_dev *gn = dev->mp;
+	struct gen_lun *lun;
+	int lunid;
+	int i;
+
+	gen_for_each_lun(gn, lun, i) {
+		if (lun->tgt != t)
+			continue;
+
+		lunid = lun->vlun.id;
+		WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+	}
+}
+
+static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
 {
 	struct nvm_tgt_type *tt = t->type;
 	struct gendisk *tdisk = t->disk;
@@ -122,6 +165,7 @@ static void __gen_remove_target(struct nvm_target *t)
 	if (tt->exit)
 		tt->exit(tdisk->private_data);
 
+	gen_release_luns(dev, t);
 	put_disk(tdisk);
 
 	list_del(&t->list);
@@ -152,7 +196,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
 		mutex_unlock(&gn->lock);
 		return 1;
 	}
-	__gen_remove_target(t);
+	__gen_remove_target(dev, t);
 	mutex_unlock(&gn->lock);
 
 	return 0;
@@ -474,7 +518,7 @@ static void gen_unregister(struct nvm_dev *dev)
 	list_for_each_entry_safe(t, tmp, &gn->targets, list) {
 		if (t->dev != dev)
 			continue;
-		__gen_remove_target(t);
+		__gen_remove_target(dev, t);
 	}
 	mutex_unlock(&gn->lock);
 
@@ -618,16 +662,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
 	return nvm_erase_ppa(dev, &addr, 1, flags);
 }
 
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
-{
-	return test_and_set_bit(lunid, dev->lun_map);
-}
-
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
-	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
-}
-
 static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
 {
 	struct gen_dev *gn = dev->mp;
@@ -674,8 +708,6 @@ static struct nvmm_type gen = {
 	.mark_blk		= gen_mark_blk,
 
 	.get_lun		= gen_get_lun,
-	.reserve_lun		= gen_reserve_lun,
-	.release_lun		= gen_release_lun,
 	.lun_info_print		= gen_lun_info_print,
 
 	.get_area		= gen_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index f293d00..cb30ccf 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1167,8 +1167,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
-	struct nvm_dev *dev = rrpc->dev;
-	struct nvm_lun *lun;
 	struct rrpc_lun *rlun;
 	int i;
 
@@ -1177,10 +1175,6 @@ static void rrpc_luns_free(struct rrpc *rrpc)
 
 	for (i = 0; i < rrpc->nr_luns; i++) {
 		rlun = &rrpc->luns[i];
-		lun = rlun->parent;
-		if (!lun)
-			break;
-		dev->mt->release_lun(dev, lun->id);
 		vfree(rlun->blocks);
 	}
 
@@ -1210,11 +1204,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 		int lunid = lun_begin + i;
 		struct nvm_lun *lun;
 
-		if (dev->mt->reserve_lun(dev, lunid)) {
-			pr_err("rrpc: lun %u is already allocated\n", lunid);
-			goto err;
-		}
-
 		lun = dev->mt->get_lun(dev, lunid);
 		if (!lun) {
 			pr_err("rrpc: cannot get lun %d\n", lun->id);
@@ -1508,6 +1497,7 @@ err:
 static struct nvm_tgt_type tt_rrpc = {
 	.name		= "rrpc",
 	.version	= {1, 0, 0},
+	.exclusive	= 1,
 
 	.make_rq	= rrpc_make_rq,
 	.capacity	= rrpc_capacity,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 14c6fa5..1957829 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -438,6 +438,7 @@ typedef void (nvm_tgt_exit_fn)(void *);
 struct nvm_tgt_type {
 	const char *name;
 	unsigned int version[3];
+	int exclusive;
 
 	/* target entry points */
 	nvm_tgt_make_rq_fn *make_rq;
@@ -487,8 +488,6 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, int);
 typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
-typedef void (nvmm_release_lun)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -519,8 +518,6 @@ struct nvmm_type {
 
 	/* Configuration management */
 	nvmm_get_lun_fn *get_lun;
-	nvmm_reserve_lun *reserve_lun;
-	nvmm_release_lun *release_lun;
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 5/7] lightnvm: export set bad block table
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
                   ` (2 preceding siblings ...)
  2016-10-27 14:49 ` [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks Javier González
@ 2016-10-27 14:49 ` Javier González
  2016-10-27 14:49 ` [PATCH 6/7] lightnvm: add ECC error codes Javier González
  2016-10-27 14:49 ` [PATCH 7/7] lightnvm: rrpc: split bios of size > 256kb Javier González
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

Bad blocks should be managed by block owners. This would be either
targets for data blocks or sysblk for system blocks.

In order to support this, export two functions: One to mark a block as
an specific type (e.g., bad block) and another to update the bad block
table on the device.

Move bad block management to rrpc.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/core.c   | 27 +++++++++++++++++++++++++++
 drivers/lightnvm/gennvm.c | 25 +------------------------
 drivers/lightnvm/rrpc.c   | 34 +++++++++++++++++++++++++++++++++-
 drivers/lightnvm/sysblk.c | 29 +++++------------------------
 include/linux/lightnvm.h  | 17 ++++++++++++++++-
 5 files changed, 82 insertions(+), 50 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 4be3879..a81ed1c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -197,6 +197,33 @@ void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
 }
 EXPORT_SYMBOL(nvm_mark_blk);
 
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+								int type)
+{
+	struct nvm_rq rqd;
+	int ret;
+
+	if (nr_ppas > dev->ops->max_phys_sect) {
+		pr_err("nvm: unable to update all sysblocks atomically\n");
+		return -EINVAL;
+	}
+
+	memset(&rqd, 0, sizeof(struct nvm_rq));
+
+	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+	nvm_generic_to_addr_mode(dev, &rqd);
+
+	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+	nvm_free_rqd_ppalist(dev, &rqd);
+	if (ret) {
+		pr_err("nvm: sysblk failed bb mark\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(nvm_set_bb_tbl);
+
 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 {
 	return dev->mt->submit_io(dev, rqd);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 575afc4..ae19a61 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -611,34 +611,11 @@ static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
 	blk->state = type;
 }
 
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-	int bit = -1;
-	int max_secs = dev->ops->max_phys_sect;
-	void *comp_bits = &rqd->ppa_status;
-
-	nvm_addr_to_generic_mode(dev, rqd);
-
-	/* look up blocks and mark them as bad */
-	if (rqd->nr_ppas == 1) {
-		gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
-		return;
-	}
-
-	while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
-		gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
-}
-
 static void gen_end_io(struct nvm_rq *rqd)
 {
 	struct nvm_tgt_instance *ins = rqd->ins;
 
-	if (rqd->error == NVM_RSP_ERR_FAILWRITE)
-		gen_mark_blk_bad(rqd->dev, rqd);
-
+	/* Write failures and bad blocks are managed within the target */
 	ins->tt->end_io(rqd);
 }
 
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index cb30ccf..8deef2e 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -716,6 +716,34 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
 	queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 }
 
+static void __rrpc_mark_bad_block(struct nvm_dev *dev, struct ppa_addr *ppa)
+{
+		nvm_mark_blk(dev, *ppa, NVM_BLK_ST_BAD);
+		nvm_set_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	void *comp_bits = &rqd->ppa_status;
+	struct ppa_addr ppa, prev_ppa;
+	int nr_ppas = rqd->nr_ppas;
+	int bit;
+
+	if (rqd->nr_ppas == 1)
+		__rrpc_mark_bad_block(dev, &rqd->ppa_addr);
+
+	ppa_set_empty(&prev_ppa);
+	bit = -1;
+	while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+		ppa = rqd->ppa_list[bit];
+		if (ppa_cmp_blk(ppa, prev_ppa))
+			continue;
+
+		__rrpc_mark_bad_block(dev, &ppa);
+	}
+}
+
 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
 						sector_t laddr, uint8_t npages)
 {
@@ -742,8 +770,12 @@ static void rrpc_end_io(struct nvm_rq *rqd)
 	uint8_t npages = rqd->nr_ppas;
 	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
 
-	if (bio_data_dir(rqd->bio) == WRITE)
+	if (bio_data_dir(rqd->bio) == WRITE) {
+		if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+			rrpc_mark_bad_block(rrpc, rqd);
+
 		rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+	}
 
 	bio_put(rqd->bio);
 
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index d229067..fa644af 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -267,29 +267,10 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
 	return found;
 }
 
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+								int type)
 {
-	struct nvm_rq rqd;
-	int ret;
-
-	if (s->nr_ppas > dev->ops->max_phys_sect) {
-		pr_err("nvm: unable to update all sysblocks atomically\n");
-		return -EINVAL;
-	}
-
-	memset(&rqd, 0, sizeof(struct nvm_rq));
-
-	nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
-	nvm_generic_to_addr_mode(dev, &rqd);
-
-	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-	nvm_free_rqd_ppalist(dev, &rqd);
-	if (ret) {
-		pr_err("nvm: sysblk failed bb mark\n");
-		return -EINVAL;
-	}
-
-	return 0;
+	return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
 }
 
 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
@@ -573,7 +554,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
 	if (ret)
 		goto err_mark;
 
-	ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+	ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
 	if (ret)
 		goto err_mark;
 
@@ -733,7 +714,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
 		mutex_lock(&dev->mlock);
 		ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
 		if (!ret)
-			ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+			ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
 		mutex_unlock(&dev->mlock);
 	}
 err_ppas:
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 1957829..e3ccaff 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -425,6 +425,19 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
 	return ppa;
 }
 
+static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
+{
+	if (ppa_empty(ppa1) || ppa_empty(ppa2))
+		return 0;
+
+
+	if ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
+					(ppa1.g.blk == ppa2.g.blk))
+		return 1;
+
+	return 0;
+}
+
 static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
 {
 	return dev->lptbl[slc_pg];
@@ -538,7 +551,9 @@ extern struct nvm_dev *nvm_alloc_dev(int);
 extern int nvm_register(struct nvm_dev *);
 extern void nvm_unregister(struct nvm_dev *);
 
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
+extern void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
+extern int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas,
+							int nr_ppas, int type);
 
 extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 6/7] lightnvm: add ECC error codes
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
                   ` (3 preceding siblings ...)
  2016-10-27 14:49 ` [PATCH 5/7] lightnvm: export set bad block table Javier González
@ 2016-10-27 14:49 ` Javier González
  2016-10-27 14:49 ` [PATCH 7/7] lightnvm: rrpc: split bios of size > 256kb Javier González
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

Add ECC error codes to enable the appropriate handling in the target.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 include/linux/lightnvm.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e3ccaff..33643ae 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -107,6 +107,8 @@ enum {
 	NVM_RSP_NOT_CHANGEABLE	= 0x1,
 	NVM_RSP_ERR_FAILWRITE	= 0x40ff,
 	NVM_RSP_ERR_EMPTYPAGE	= 0x42ff,
+	NVM_RSP_ERR_FAILECC	= 0x4281,
+	NVM_RSP_WARN_HIGHECC	= 0x4700,
 
 	/* Device opcodes */
 	NVM_OP_HBREAD		= 0x02,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 7/7] lightnvm: rrpc: split bios of size > 256kb
  2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
                   ` (4 preceding siblings ...)
  2016-10-27 14:49 ` [PATCH 6/7] lightnvm: add ECC error codes Javier González
@ 2016-10-27 14:49 ` Javier González
  5 siblings, 0 replies; 11+ messages in thread
From: Javier González @ 2016-10-27 14:49 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González

rrpc cannot handle bios of size > 256kb due to NVME's 64 bit completion
bitmap. If a larger bio comes, split it explicitly.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/rrpc.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 8deef2e..0b8251f 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -984,6 +984,12 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
 	struct nvm_rq *rqd;
 	int err;
 
+	/*
+	 * Multipage is supported up until 256kb due to NVME's 64 bit completion
+	 * bitmap.
+	 */
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (bio_op(bio) == REQ_OP_DISCARD) {
 		rrpc_discard(rrpc, bio);
 		return BLK_QC_T_NONE;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks
  2016-10-31 20:22     ` Jens Axboe
@ 2016-11-01  0:38       ` Matias Bjørling
  0 siblings, 0 replies; 11+ messages in thread
From: Matias Bjørling @ 2016-11-01  0:38 UTC (permalink / raw)
  To: Jens Axboe, Javier González
  Cc: linux-block, linux-kernel, Javier González, Javier González

On 10/31/2016 09:22 PM, Jens Axboe wrote:
> On 10/31/2016 07:08 AM, Matias Bjørling wrote:
>> On 10/27/2016 08:01 PM, Javier González wrote:
>>> From: Javier González <javier@javigon.com>
>>>
>>> On target initialization, targets use callbacks to the media manager to
>>> configure the LUNs they use. In order to simplify the flow, drop this
>>> callbacks and manage everything internally on the media manager.
>>>
>>> By making use of the newly introduce LUN management structure, the media
>>> manager knows which target exclusively owns each target and can
>>> therefore allocate and free all the necessary structures before
>>> initializing the target. Not exclusively owned LUNs belong to the media
>>> manager in any case.
>>>
>>> Adapt rrpc to not use the reserve_lun/release_lun callback functions.
>>>
>>> Signed-off-by: Javier González <javier@cnexlabs.com>
>>> ---
>>>  drivers/lightnvm/gennvm.c | 68
>>> ++++++++++++++++++++++++++++++++++++-----------
>>>  drivers/lightnvm/rrpc.c   | 12 +--------
>>>  include/linux/lightnvm.h  |  5 +---
>>>  3 files changed, 55 insertions(+), 30 deletions(-)
>>>
>>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>>> index 8bff725..a340685 100644
>>> --- a/drivers/lightnvm/gennvm.c
>>> +++ b/drivers/lightnvm/gennvm.c
>>> @@ -35,6 +35,50 @@ static const struct block_device_operations
>>> gen_fops = {
>>>      .owner        = THIS_MODULE,
>>>  };
>>>
>>> +static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int
>>> lun_end,
>>> +                struct nvm_target *t)
>>> +{
>>> +    struct gen_dev *gn = dev->mp;
>>> +    struct gen_lun *lun;
>>> +    int i;
>>> +
>>> +    for (i = lun_begin; i <= lun_end; i++) {
>>> +        if (test_and_set_bit(i, dev->lun_map)) {
>>> +            pr_err("gennvm: lun %d is already allocated\n", i);
>>> +            goto fail;
>>> +        }
>>> +
>>> +        lun = &gn->luns[i];
>>> +        lun->tgt = t;
>>> +        lun->vlun.priv = lun->mgmt;
>>> +    }
>>> +
>>> +    return 0;
>>> +fail:
>>> +    while (--i > lun_begin)
>>> +        clear_bit(i, dev->lun_map);
>>> +
>>> +    return 1;
>>
>> return -EINVAL;
> 
> -EBUSY?
> 
>> Lad os lige snakke lidt om dette her senere også :)
> 
> And probably keep the public emails in English :-)
> 

Haha, will do. Good to spice up the mailing list a bit :)

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks
  2016-10-31 13:08   ` Matias Bjørling
@ 2016-10-31 20:22     ` Jens Axboe
  2016-11-01  0:38       ` Matias Bjørling
  0 siblings, 1 reply; 11+ messages in thread
From: Jens Axboe @ 2016-10-31 20:22 UTC (permalink / raw)
  To: Matias Bjørling, Javier González
  Cc: linux-block, linux-kernel, Javier González, Javier González

On 10/31/2016 07:08 AM, Matias Bjørling wrote:
> On 10/27/2016 08:01 PM, Javier González wrote:
>> From: Javier González <javier@javigon.com>
>>
>> On target initialization, targets use callbacks to the media manager to
>> configure the LUNs they use. In order to simplify the flow, drop this
>> callbacks and manage everything internally on the media manager.
>>
>> By making use of the newly introduce LUN management structure, the media
>> manager knows which target exclusively owns each target and can
>> therefore allocate and free all the necessary structures before
>> initializing the target. Not exclusively owned LUNs belong to the media
>> manager in any case.
>>
>> Adapt rrpc to not use the reserve_lun/release_lun callback functions.
>>
>> Signed-off-by: Javier González <javier@cnexlabs.com>
>> ---
>>  drivers/lightnvm/gennvm.c | 68 ++++++++++++++++++++++++++++++++++++-----------
>>  drivers/lightnvm/rrpc.c   | 12 +--------
>>  include/linux/lightnvm.h  |  5 +---
>>  3 files changed, 55 insertions(+), 30 deletions(-)
>>
>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>> index 8bff725..a340685 100644
>> --- a/drivers/lightnvm/gennvm.c
>> +++ b/drivers/lightnvm/gennvm.c
>> @@ -35,6 +35,50 @@ static const struct block_device_operations gen_fops = {
>>  	.owner		= THIS_MODULE,
>>  };
>>
>> +static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
>> +			    struct nvm_target *t)
>> +{
>> +	struct gen_dev *gn = dev->mp;
>> +	struct gen_lun *lun;
>> +	int i;
>> +
>> +	for (i = lun_begin; i <= lun_end; i++) {
>> +		if (test_and_set_bit(i, dev->lun_map)) {
>> +			pr_err("gennvm: lun %d is already allocated\n", i);
>> +			goto fail;
>> +		}
>> +
>> +		lun = &gn->luns[i];
>> +		lun->tgt = t;
>> +		lun->vlun.priv = lun->mgmt;
>> +	}
>> +
>> +	return 0;
>> +fail:
>> +	while (--i > lun_begin)
>> +		clear_bit(i, dev->lun_map);
>> +
>> +	return 1;
>
> return -EINVAL;

-EBUSY?

> Lad os lige snakke lidt om dette her senere også :)

And probably keep the public emails in English :-)

-- 
Jens Axboe

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks
  2016-10-27 18:01 ` [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks Javier González
@ 2016-10-31 13:08   ` Matias Bjørling
  2016-10-31 20:22     ` Jens Axboe
  0 siblings, 1 reply; 11+ messages in thread
From: Matias Bjørling @ 2016-10-31 13:08 UTC (permalink / raw)
  To: Javier González
  Cc: linux-block, linux-kernel, Javier González, Javier González

On 10/27/2016 08:01 PM, Javier González wrote:
> From: Javier González <javier@javigon.com>
> 
> On target initialization, targets use callbacks to the media manager to
> configure the LUNs they use. In order to simplify the flow, drop this
> callbacks and manage everything internally on the media manager.
> 
> By making use of the newly introduce LUN management structure, the media
> manager knows which target exclusively owns each target and can
> therefore allocate and free all the necessary structures before
> initializing the target. Not exclusively owned LUNs belong to the media
> manager in any case.
> 
> Adapt rrpc to not use the reserve_lun/release_lun callback functions.
> 
> Signed-off-by: Javier González <javier@cnexlabs.com>
> ---
>  drivers/lightnvm/gennvm.c | 68 ++++++++++++++++++++++++++++++++++++-----------
>  drivers/lightnvm/rrpc.c   | 12 +--------
>  include/linux/lightnvm.h  |  5 +---
>  3 files changed, 55 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 8bff725..a340685 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -35,6 +35,50 @@ static const struct block_device_operations gen_fops = {
>  	.owner		= THIS_MODULE,
>  };
>  
> +static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
> +			    struct nvm_target *t)
> +{
> +	struct gen_dev *gn = dev->mp;
> +	struct gen_lun *lun;
> +	int i;
> +
> +	for (i = lun_begin; i <= lun_end; i++) {
> +		if (test_and_set_bit(i, dev->lun_map)) {
> +			pr_err("gennvm: lun %d is already allocated\n", i);
> +			goto fail;
> +		}
> +
> +		lun = &gn->luns[i];
> +		lun->tgt = t;
> +		lun->vlun.priv = lun->mgmt;
> +	}
> +
> +	return 0;
> +fail:
> +	while (--i > lun_begin)
> +		clear_bit(i, dev->lun_map);
> +
> +	return 1;

return -EINVAL;

Lad os lige snakke lidt om dette her senere også :)
> +}
> +
> +static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
> +{
> +	struct gen_dev *gn = dev->mp;
> +	struct gen_lun *lun;
> +	int lunid;
> +	int i;
> +
> +	gen_for_each_lun(gn, lun, i) {
> +		if (lun->tgt != t)
> +			continue;
> +
> +		lunid = lun->vlun.id;
> +		WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
> +		lun->vlun.priv = NULL;
> +		lun->tgt = NULL;
> +	}
> +}
> +
>  static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
>  {
>  	struct gen_dev *gn = dev->mp;
> @@ -80,6 +124,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
>  	tdisk->fops = &gen_fops;
>  	tdisk->queue = tqueue;
>  
> +	if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
> +		goto err_reserve;
> +
>  	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
>  	if (IS_ERR(targetdata))
>  		goto err_init;
> @@ -102,6 +149,8 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
>  
>  	return 0;
>  err_init:
> +	gen_release_luns(dev, t);
> +err_reserve:
>  	put_disk(tdisk);
>  err_queue:
>  	blk_cleanup_queue(tqueue);
> @@ -110,7 +159,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
>  	return -ENOMEM;
>  }
>  
> -static void __gen_remove_target(struct nvm_target *t)
> +static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
>  {
>  	struct nvm_tgt_type *tt = t->type;
>  	struct gendisk *tdisk = t->disk;
> @@ -122,6 +171,7 @@ static void __gen_remove_target(struct nvm_target *t)
>  	if (tt->exit)
>  		tt->exit(tdisk->private_data);
>  
> +	gen_release_luns(dev, t);
>  	put_disk(tdisk);
>  
>  	list_del(&t->list);
> @@ -152,7 +202,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
>  		mutex_unlock(&gn->lock);
>  		return 1;
>  	}
> -	__gen_remove_target(t);
> +	__gen_remove_target(dev, t);
>  	mutex_unlock(&gn->lock);
>  
>  	return 0;
> @@ -474,7 +524,7 @@ static void gen_unregister(struct nvm_dev *dev)
>  	list_for_each_entry_safe(t, tmp, &gn->targets, list) {
>  		if (t->dev != dev)
>  			continue;
> -		__gen_remove_target(t);
> +		__gen_remove_target(dev, t);
>  	}
>  	mutex_unlock(&gn->lock);
>  
> @@ -618,16 +668,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
>  	return nvm_erase_ppa(dev, &addr, 1, flags);
>  }
>  
> -static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
> -{
> -	return test_and_set_bit(lunid, dev->lun_map);
> -}
> -
> -static void gen_release_lun(struct nvm_dev *dev, int lunid)
> -{
> -	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
> -}
> -
>  static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
>  {
>  	struct gen_dev *gn = dev->mp;
> @@ -674,8 +714,6 @@ static struct nvmm_type gen = {
>  	.mark_blk		= gen_mark_blk,
>  
>  	.get_lun		= gen_get_lun,
> -	.reserve_lun		= gen_reserve_lun,
> -	.release_lun		= gen_release_lun,
>  	.lun_info_print		= gen_lun_info_print,
>  
>  	.get_area		= gen_get_area,
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index f293d00..cb30ccf 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1167,8 +1167,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
>  
>  static void rrpc_luns_free(struct rrpc *rrpc)
>  {
> -	struct nvm_dev *dev = rrpc->dev;
> -	struct nvm_lun *lun;
>  	struct rrpc_lun *rlun;
>  	int i;
>  
> @@ -1177,10 +1175,6 @@ static void rrpc_luns_free(struct rrpc *rrpc)
>  
>  	for (i = 0; i < rrpc->nr_luns; i++) {
>  		rlun = &rrpc->luns[i];
> -		lun = rlun->parent;
> -		if (!lun)
> -			break;
> -		dev->mt->release_lun(dev, lun->id);
>  		vfree(rlun->blocks);
>  	}
>  
> @@ -1210,11 +1204,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  		int lunid = lun_begin + i;
>  		struct nvm_lun *lun;
>  
> -		if (dev->mt->reserve_lun(dev, lunid)) {
> -			pr_err("rrpc: lun %u is already allocated\n", lunid);
> -			goto err;
> -		}
> -
>  		lun = dev->mt->get_lun(dev, lunid);
>  		if (!lun) {
>  			pr_err("rrpc: cannot get lun %d\n", lun->id);
> @@ -1508,6 +1497,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>  static struct nvm_tgt_type tt_rrpc = {
>  	.name		= "rrpc",
>  	.version	= {1, 0, 0},
> +	.exclusive	= 1,
>  
>  	.make_rq	= rrpc_make_rq,
>  	.capacity	= rrpc_capacity,
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 14c6fa5..1957829 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -438,6 +438,7 @@ typedef void (nvm_tgt_exit_fn)(void *);
>  struct nvm_tgt_type {
>  	const char *name;
>  	unsigned int version[3];
> +	int exclusive;
>  
>  	/* target entry points */
>  	nvm_tgt_make_rq_fn *make_rq;
> @@ -487,8 +488,6 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
>  typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, int);
>  typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
> -typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
> -typedef void (nvmm_release_lun)(struct nvm_dev *, int);
>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>  
>  typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> @@ -519,8 +518,6 @@ struct nvmm_type {
>  
>  	/* Configuration management */
>  	nvmm_get_lun_fn *get_lun;
> -	nvmm_reserve_lun *reserve_lun;
> -	nvmm_release_lun *release_lun;
>  
>  	/* Statistics */
>  	nvmm_lun_info_print_fn *lun_info_print;
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks
  2016-10-27 18:01 [PATCH 0/7] LightNVM patchset V2 Javier González
@ 2016-10-27 18:01 ` Javier González
  2016-10-31 13:08   ` Matias Bjørling
  0 siblings, 1 reply; 11+ messages in thread
From: Javier González @ 2016-10-27 18:01 UTC (permalink / raw)
  To: mb; +Cc: linux-block, linux-kernel, Javier González, Javier González

From: Javier González <javier@javigon.com>

On target initialization, targets use callbacks to the media manager to
configure the LUNs they use. In order to simplify the flow, drop this
callbacks and manage everything internally on the media manager.

By making use of the newly introduce LUN management structure, the media
manager knows which target exclusively owns each target and can
therefore allocate and free all the necessary structures before
initializing the target. Not exclusively owned LUNs belong to the media
manager in any case.

Adapt rrpc to not use the reserve_lun/release_lun callback functions.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/gennvm.c | 68 ++++++++++++++++++++++++++++++++++++-----------
 drivers/lightnvm/rrpc.c   | 12 +--------
 include/linux/lightnvm.h  |  5 +---
 3 files changed, 55 insertions(+), 30 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 8bff725..a340685 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,50 @@ static const struct block_device_operations gen_fops = {
 	.owner		= THIS_MODULE,
 };
 
+static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
+			    struct nvm_target *t)
+{
+	struct gen_dev *gn = dev->mp;
+	struct gen_lun *lun;
+	int i;
+
+	for (i = lun_begin; i <= lun_end; i++) {
+		if (test_and_set_bit(i, dev->lun_map)) {
+			pr_err("gennvm: lun %d is already allocated\n", i);
+			goto fail;
+		}
+
+		lun = &gn->luns[i];
+		lun->tgt = t;
+		lun->vlun.priv = lun->mgmt;
+	}
+
+	return 0;
+fail:
+	while (--i > lun_begin)
+		clear_bit(i, dev->lun_map);
+
+	return 1;
+}
+
+static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
+{
+	struct gen_dev *gn = dev->mp;
+	struct gen_lun *lun;
+	int lunid;
+	int i;
+
+	gen_for_each_lun(gn, lun, i) {
+		if (lun->tgt != t)
+			continue;
+
+		lunid = lun->vlun.id;
+		WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+		lun->vlun.priv = NULL;
+		lun->tgt = NULL;
+	}
+}
+
 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 {
 	struct gen_dev *gn = dev->mp;
@@ -80,6 +124,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	tdisk->fops = &gen_fops;
 	tdisk->queue = tqueue;
 
+	if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
+		goto err_reserve;
+
 	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
 	if (IS_ERR(targetdata))
 		goto err_init;
@@ -102,6 +149,8 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 
 	return 0;
 err_init:
+	gen_release_luns(dev, t);
+err_reserve:
 	put_disk(tdisk);
 err_queue:
 	blk_cleanup_queue(tqueue);
@@ -110,7 +159,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	return -ENOMEM;
 }
 
-static void __gen_remove_target(struct nvm_target *t)
+static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
 {
 	struct nvm_tgt_type *tt = t->type;
 	struct gendisk *tdisk = t->disk;
@@ -122,6 +171,7 @@ static void __gen_remove_target(struct nvm_target *t)
 	if (tt->exit)
 		tt->exit(tdisk->private_data);
 
+	gen_release_luns(dev, t);
 	put_disk(tdisk);
 
 	list_del(&t->list);
@@ -152,7 +202,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
 		mutex_unlock(&gn->lock);
 		return 1;
 	}
-	__gen_remove_target(t);
+	__gen_remove_target(dev, t);
 	mutex_unlock(&gn->lock);
 
 	return 0;
@@ -474,7 +524,7 @@ static void gen_unregister(struct nvm_dev *dev)
 	list_for_each_entry_safe(t, tmp, &gn->targets, list) {
 		if (t->dev != dev)
 			continue;
-		__gen_remove_target(t);
+		__gen_remove_target(dev, t);
 	}
 	mutex_unlock(&gn->lock);
 
@@ -618,16 +668,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
 	return nvm_erase_ppa(dev, &addr, 1, flags);
 }
 
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
-{
-	return test_and_set_bit(lunid, dev->lun_map);
-}
-
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
-	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
-}
-
 static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
 {
 	struct gen_dev *gn = dev->mp;
@@ -674,8 +714,6 @@ static struct nvmm_type gen = {
 	.mark_blk		= gen_mark_blk,
 
 	.get_lun		= gen_get_lun,
-	.reserve_lun		= gen_reserve_lun,
-	.release_lun		= gen_release_lun,
 	.lun_info_print		= gen_lun_info_print,
 
 	.get_area		= gen_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index f293d00..cb30ccf 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1167,8 +1167,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
-	struct nvm_dev *dev = rrpc->dev;
-	struct nvm_lun *lun;
 	struct rrpc_lun *rlun;
 	int i;
 
@@ -1177,10 +1175,6 @@ static void rrpc_luns_free(struct rrpc *rrpc)
 
 	for (i = 0; i < rrpc->nr_luns; i++) {
 		rlun = &rrpc->luns[i];
-		lun = rlun->parent;
-		if (!lun)
-			break;
-		dev->mt->release_lun(dev, lun->id);
 		vfree(rlun->blocks);
 	}
 
@@ -1210,11 +1204,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 		int lunid = lun_begin + i;
 		struct nvm_lun *lun;
 
-		if (dev->mt->reserve_lun(dev, lunid)) {
-			pr_err("rrpc: lun %u is already allocated\n", lunid);
-			goto err;
-		}
-
 		lun = dev->mt->get_lun(dev, lunid);
 		if (!lun) {
 			pr_err("rrpc: cannot get lun %d\n", lun->id);
@@ -1508,6 +1497,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 static struct nvm_tgt_type tt_rrpc = {
 	.name		= "rrpc",
 	.version	= {1, 0, 0},
+	.exclusive	= 1,
 
 	.make_rq	= rrpc_make_rq,
 	.capacity	= rrpc_capacity,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 14c6fa5..1957829 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -438,6 +438,7 @@ typedef void (nvm_tgt_exit_fn)(void *);
 struct nvm_tgt_type {
 	const char *name;
 	unsigned int version[3];
+	int exclusive;
 
 	/* target entry points */
 	nvm_tgt_make_rq_fn *make_rq;
@@ -487,8 +488,6 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, int);
 typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
-typedef void (nvmm_release_lun)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -519,8 +518,6 @@ struct nvmm_type {
 
 	/* Configuration management */
 	nvmm_get_lun_fn *get_lun;
-	nvmm_reserve_lun *reserve_lun;
-	nvmm_release_lun *release_lun;
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2016-11-01  0:38 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-27 14:49 [PATCH 1/7] lightnvm: enable to send hint to erase command Javier González
2016-10-27 14:49 ` [PATCH 2/7] lightnvm: do not decide on device blocks Javier González
2016-10-27 14:49 ` [PATCH 3/7] lightnvm: manage block list on LUN owner Javier González
2016-10-27 14:49 ` [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks Javier González
2016-10-27 14:49 ` [PATCH 5/7] lightnvm: export set bad block table Javier González
2016-10-27 14:49 ` [PATCH 6/7] lightnvm: add ECC error codes Javier González
2016-10-27 14:49 ` [PATCH 7/7] lightnvm: rrpc: split bios of size > 256kb Javier González
2016-10-27 18:01 [PATCH 0/7] LightNVM patchset V2 Javier González
2016-10-27 18:01 ` [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks Javier González
2016-10-31 13:08   ` Matias Bjørling
2016-10-31 20:22     ` Jens Axboe
2016-11-01  0:38       ` Matias Bjørling

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).