All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dongsheng Yang <yangds.fnst@cn.fujitsu.com>
To: "Matias Bjørling" <m@bjorling.me>,
	axboe@fb.com, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org
Cc: <hch@infradead.org>, <jg@lightnvm.io>, <Stephen.Bates@pmcs.com>,
	<keith.busch@intel.com>
Subject: Re: [PATCH v13 2/5] gennvm: Generic NVM manager
Date: Thu, 29 Oct 2015 07:41:03 +0800	[thread overview]
Message-ID: <56315D0F.5010602@cn.fujitsu.com> (raw)
In-Reply-To: <1445992233-676-3-git-send-email-m@bjorling.me>

On 10/28/2015 08:30 AM, Matias Bjørling wrote:
> The implementation for Open-Channel SSDs is divided into media
[...]
> +		lun->reserved_blocks = 2; /* for GC only */
> +		lun->vlun.id = i;
> +		lun->vlun.lun_id = i % dev->luns_per_chnl;
> +		lun->vlun.chnl_id = i / dev->luns_per_chnl;

Please use do_div(). % would be not supported in some platforms, as
the kbuild pointed in V12.

Yang

> +		lun->vlun.nr_free_blocks = dev->blks_per_lun;
> +	}
> +	return 0;
> +}
> +
> +static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
> +								void *private)
> +{
> +	struct gen_nvm *gn = private;
> +	struct gen_lun *lun = &gn->luns[lun_id];
> +	struct nvm_block *block;
> +	int i;
> +
> +	if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
> +		return 0;
> +
> +	i = -1;
> +	while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) <
> +			nr_blocks) {
> +		block = &lun->vlun.blocks[i];
> +		if (!block) {
> +			pr_err("gen_nvm: BB data is out of bounds.\n");
> +			return -EINVAL;
> +		}
> +		list_move_tail(&block->list, &lun->bb_list);
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
> +{
> +	struct nvm_dev *dev = private;
> +	struct gen_nvm *gn = dev->mp;
> +	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
> +	u64 elba = slba + nlb;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +	u64 i;
> +	int lun_id;
> +
> +	if (unlikely(elba > dev->total_pages)) {
> +		pr_err("gen_nvm: L2P data from device is out of bounds!\n");
> +		return -EINVAL;
> +	}
> +
> +	for (i = 0; i < nlb; i++) {
> +		u64 pba = le64_to_cpu(entries[i]);
> +
> +		if (unlikely(pba >= max_pages && pba != U64_MAX)) {
> +			pr_err("gen_nvm: L2P data entry is out of bounds!\n");
> +			return -EINVAL;
> +		}
> +
> +		/* Address zero is a special one. The first page on a disk is
> +		 * protected. It often holds internal device boot
> +		 * information.
> +		 */
> +		if (!pba)
> +			continue;
> +
> +		/* resolve block from physical address */
> +		lun_id = div_u64(pba, dev->sec_per_lun);
> +		lun = &gn->luns[lun_id];
> +
> +		/* Calculate block offset into lun */
> +		pba = pba - (dev->sec_per_lun * lun_id);
> +		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
> +
> +		if (!blk->type) {
> +			/* at this point, we don't know anything about the
> +			 * block. It's up to the FTL on top to re-etablish the
> +			 * block state
> +			 */
> +			list_move_tail(&blk->list, &lun->used_list);
> +			blk->type = 1;
> +			lun->vlun.nr_free_blocks--;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
> +{
> +	struct gen_lun *lun;
> +	struct nvm_block *block;
> +	sector_t lun_iter, blk_iter, cur_block_id = 0;
> +	int ret;
> +
> +	gennvm_for_each_lun(gn, lun, lun_iter) {
> +		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
> +							dev->blks_per_lun);
> +		if (!lun->vlun.blocks)
> +			return -ENOMEM;
> +
> +		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
> +			block = &lun->vlun.blocks[blk_iter];
> +
> +			INIT_LIST_HEAD(&block->list);
> +
> +			block->lun = &lun->vlun;
> +			block->id = cur_block_id++;
> +
> +			/* First block is reserved for device */
> +			if (unlikely(lun_iter == 0 && blk_iter == 0))
> +				continue;
> +
> +			list_add_tail(&block->list, &lun->free_list);
> +		}
> +
> +		if (dev->ops->get_bb_tbl) {
> +			ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
> +					dev->blks_per_lun, gennvm_block_bb, gn);
> +			if (ret)
> +				pr_err("gen_nvm: could not read BB table\n");
> +		}
> +	}
> +
> +	if (dev->ops->get_l2p_tbl) {
> +		ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
> +							gennvm_block_map, dev);
> +		if (ret) {
> +			pr_err("gen_nvm: could not read L2P table.\n");
> +			pr_warn("gen_nvm: default block initialization");
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_register(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn;
> +	int ret;
> +
> +	gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
> +	if (!gn)
> +		return -ENOMEM;
> +
> +	gn->nr_luns = dev->nr_luns;
> +	dev->mp = gn;
> +
> +	ret = gennvm_luns_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize luns\n");
> +		goto err;
> +	}
> +
> +	ret = gennvm_blocks_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize blocks\n");
> +		goto err;
> +	}
> +
> +	return 1;
> +err:
> +	kfree(gn);
> +	return ret;
> +}
> +
> +static void gennvm_unregister(struct nvm_dev *dev)
> +{
> +	gennvm_blocks_free(dev);
> +	gennvm_luns_free(dev);
> +	kfree(dev->mp);
> +	dev->mp = NULL;
> +}
> +
> +static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
> +				struct nvm_lun *vlun, unsigned long flags)
> +{
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +	struct nvm_block *blk = NULL;
> +	int is_gc = flags & NVM_IOTYPE_GC;
> +
> +	BUG_ON(!lun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	if (list_empty(&lun->free_list)) {
> +		pr_err_ratelimited("gen_nvm: lun %u have no free pages available",
> +								lun->vlun.id);
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
> +	list_move_tail(&blk->list, &lun->used_list);
> +	blk->type = 1;
> +
> +	lun->vlun.nr_free_blocks--;
> +
> +	spin_unlock(&vlun->lock);
> +out:
> +	return blk;
> +}
> +
> +static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
> +{
> +	struct nvm_lun *vlun = blk->lun;
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	switch (blk->type) {
> +	case 1:
> +		list_move_tail(&blk->list, &lun->free_list);
> +		lun->vlun.nr_free_blocks++;
> +		blk->type = 0;
> +		break;
> +	case 2:
> +		list_move_tail(&blk->list, &lun->bb_list);
> +		break;
> +	default:
> +		BUG();
> +	}
> +
> +	spin_unlock(&vlun->lock);
> +}
> +
> +static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = addr_to_generic_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
> +}
> +
> +static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = generic_to_addr_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
> +}
> +
> +static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	if (!dev->ops->submit_io)
> +		return 0;
> +
> +	/* Convert address space */
> +	gennvm_generic_to_addr_mode(dev, rqd);
> +
> +	rqd->dev = dev;
> +	return dev->ops->submit_io(dev->q, rqd);
> +}
> +
> +static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
> +								int type)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +
> +	BUG_ON(ppa->g.ch > dev->nr_chnls);
> +	BUG_ON(ppa->g.lun > dev->luns_per_chnl);
> +	BUG_ON(ppa->g.blk > dev->blks_per_lun);
> +
> +	lun = &gn->luns[ppa->g.lun * ppa->g.ch];
> +	blk = &lun->vlun.blocks[ppa->g.blk];
> +
> +	/* will be moved to bb list on put_blk from target */
> +	blk->type = type;
> +}
> +
> +/* mark block bad. It is expected the target recover from the error. */
> +static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (!dev->ops->set_bb)
> +		return;
> +
> +	if (dev->ops->set_bb(dev->q, rqd, 1))
> +		return;
> +
> +	gennvm_addr_to_generic_mode(dev, rqd);
> +
> +	/* look up blocks and mark them as bad */
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
> +	else
> +		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
> +}
> +
> +static int gennvm_end_io(struct nvm_rq *rqd, int error)
> +{
> +	struct nvm_tgt_instance *ins = rqd->ins;
> +	int ret = 0;
> +
> +	switch (error) {
> +	case NVM_RSP_SUCCESS:
> +		break;
> +	case NVM_RSP_ERR_EMPTYPAGE:
> +		break;
> +	case NVM_RSP_ERR_FAILWRITE:
> +		gennvm_mark_blk_bad(rqd->dev, rqd);
> +	default:
> +		ret++;
> +	}
> +
> +	ret += ins->tt->end_io(rqd, error);
> +
> +	return ret;
> +}
> +
> +static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
> +							unsigned long flags)
> +{
> +	int plane_cnt = 0, pl_idx, ret;
> +	struct ppa_addr addr;
> +	struct nvm_rq rqd;
> +
> +	if (!dev->ops->erase_block)
> +		return 0;
> +
> +	addr = block_to_ppa(dev, blk);
> +
> +	if (dev->plane_mode == NVM_PLANE_SINGLE) {
> +		rqd.nr_pages = 1;
> +		rqd.ppa_addr = addr;
> +	} else {
> +		plane_cnt = (1 << dev->plane_mode);
> +		rqd.nr_pages = plane_cnt;
> +
> +		rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
> +							&rqd.dma_ppa_list);
> +		if (!rqd.ppa_list) {
> +			pr_err("gen_nvm: failed to allocate dma memory\n");
> +			return -ENOMEM;
> +		}
> +
> +		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
> +			addr.g.pl = pl_idx;
> +			rqd.ppa_list[pl_idx] = addr;
> +		}
> +	}
> +
> +	gennvm_generic_to_addr_mode(dev, &rqd);
> +
> +	ret = dev->ops->erase_block(dev->q, &rqd);
> +
> +	if (plane_cnt)
> +		nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
> +
> +	return ret;
> +}
> +
> +static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +
> +	return &gn->luns[lunid].vlun;
> +}
> +
> +static void gennvm_free_blocks_print(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	unsigned int i;
> +
> +	gennvm_for_each_lun(gn, lun, i)
> +		pr_info("%s: lun%8u\t%u\n",
> +					dev->name, i, lun->vlun.nr_free_blocks);
> +}
> +
> +static struct nvmm_type gennvm = {
> +	.name		= "gennvm",
> +	.version	= {0, 1, 0},
> +
> +	.register_mgr	= gennvm_register,
> +	.unregister_mgr	= gennvm_unregister,
> +
> +	.get_blk	= gennvm_get_blk,
> +	.put_blk	= gennvm_put_blk,
> +
> +	.submit_io	= gennvm_submit_io,
> +	.end_io		= gennvm_end_io,
> +	.erase_blk	= gennvm_erase_blk,
> +
> +	.get_lun	= gennvm_get_lun,
> +	.free_blocks_print = gennvm_free_blocks_print,
> +};
> +
> +static int __init gennvm_module_init(void)
> +{
> +	return nvm_register_mgr(&gennvm);
> +}
> +
> +static void gennvm_module_exit(void)
> +{
> +	nvm_unregister_mgr(&gennvm);
> +}
> +
> +module_init(gennvm_module_init);
> +module_exit(gennvm_module_exit);
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("Block manager for Hybrid Open-Channel SSDs");
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> new file mode 100644
> index 0000000..d23bd35
> --- /dev/null
> +++ b/drivers/lightnvm/gennvm.h
> @@ -0,0 +1,46 @@
> +/*
> + * Copyright: Matias Bjorling <mb@bjorling.me>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License version
> + * 2 as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but
> + * WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + */
> +
> +#ifndef GENNVM_H_
> +#define GENNVM_H_
> +
> +#include <linux/module.h>
> +#include <linux/vmalloc.h>
> +
> +#include <linux/lightnvm.h>
> +
> +struct gen_lun {
> +	struct nvm_lun vlun;
> +
> +	int reserved_blocks;
> +	/* lun block lists */
> +	struct list_head used_list;	/* In-use blocks */
> +	struct list_head free_list;	/* Not used blocks i.e. released
> +					 * and ready for use
> +					 */
> +	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
> +					 * free_list and used_list
> +					 */
> +};
> +
> +struct gen_nvm {
> +	int nr_luns;
> +	struct gen_lun *luns;
> +};
> +
> +#define gennvm_for_each_lun(bm, lun, i) \
> +		for ((i) = 0, lun = &(bm)->luns[0]; \
> +			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> +
> +#endif /* GENNVM_H_ */
>


WARNING: multiple messages have this Message-ID (diff)
From: Dongsheng Yang <yangds.fnst@cn.fujitsu.com>
To: "Matias Bjørling" <m@bjorling.me>,
	axboe@fb.com, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org
Cc: <hch@infradead.org>, <jg@lightnvm.io>, <Stephen.Bates@pmcs.com>,
	<keith.busch@intel.com>
Subject: Re: [PATCH v13 2/5] gennvm: Generic NVM manager
Date: Thu, 29 Oct 2015 07:41:03 +0800	[thread overview]
Message-ID: <56315D0F.5010602@cn.fujitsu.com> (raw)
In-Reply-To: <1445992233-676-3-git-send-email-m@bjorling.me>

On 10/28/2015 08:30 AM, Matias Bjørling wrote:
> The implementation for Open-Channel SSDs is divided into media
[...]
> +		lun->reserved_blocks = 2; /* for GC only */
> +		lun->vlun.id = i;
> +		lun->vlun.lun_id = i % dev->luns_per_chnl;
> +		lun->vlun.chnl_id = i / dev->luns_per_chnl;

Please use do_div(). % would be not supported in some platforms, as
the kbuild pointed in V12.

Yang

> +		lun->vlun.nr_free_blocks = dev->blks_per_lun;
> +	}
> +	return 0;
> +}
> +
> +static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
> +								void *private)
> +{
> +	struct gen_nvm *gn = private;
> +	struct gen_lun *lun = &gn->luns[lun_id];
> +	struct nvm_block *block;
> +	int i;
> +
> +	if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
> +		return 0;
> +
> +	i = -1;
> +	while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) <
> +			nr_blocks) {
> +		block = &lun->vlun.blocks[i];
> +		if (!block) {
> +			pr_err("gen_nvm: BB data is out of bounds.\n");
> +			return -EINVAL;
> +		}
> +		list_move_tail(&block->list, &lun->bb_list);
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
> +{
> +	struct nvm_dev *dev = private;
> +	struct gen_nvm *gn = dev->mp;
> +	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
> +	u64 elba = slba + nlb;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +	u64 i;
> +	int lun_id;
> +
> +	if (unlikely(elba > dev->total_pages)) {
> +		pr_err("gen_nvm: L2P data from device is out of bounds!\n");
> +		return -EINVAL;
> +	}
> +
> +	for (i = 0; i < nlb; i++) {
> +		u64 pba = le64_to_cpu(entries[i]);
> +
> +		if (unlikely(pba >= max_pages && pba != U64_MAX)) {
> +			pr_err("gen_nvm: L2P data entry is out of bounds!\n");
> +			return -EINVAL;
> +		}
> +
> +		/* Address zero is a special one. The first page on a disk is
> +		 * protected. It often holds internal device boot
> +		 * information.
> +		 */
> +		if (!pba)
> +			continue;
> +
> +		/* resolve block from physical address */
> +		lun_id = div_u64(pba, dev->sec_per_lun);
> +		lun = &gn->luns[lun_id];
> +
> +		/* Calculate block offset into lun */
> +		pba = pba - (dev->sec_per_lun * lun_id);
> +		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
> +
> +		if (!blk->type) {
> +			/* at this point, we don't know anything about the
> +			 * block. It's up to the FTL on top to re-etablish the
> +			 * block state
> +			 */
> +			list_move_tail(&blk->list, &lun->used_list);
> +			blk->type = 1;
> +			lun->vlun.nr_free_blocks--;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
> +{
> +	struct gen_lun *lun;
> +	struct nvm_block *block;
> +	sector_t lun_iter, blk_iter, cur_block_id = 0;
> +	int ret;
> +
> +	gennvm_for_each_lun(gn, lun, lun_iter) {
> +		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
> +							dev->blks_per_lun);
> +		if (!lun->vlun.blocks)
> +			return -ENOMEM;
> +
> +		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
> +			block = &lun->vlun.blocks[blk_iter];
> +
> +			INIT_LIST_HEAD(&block->list);
> +
> +			block->lun = &lun->vlun;
> +			block->id = cur_block_id++;
> +
> +			/* First block is reserved for device */
> +			if (unlikely(lun_iter == 0 && blk_iter == 0))
> +				continue;
> +
> +			list_add_tail(&block->list, &lun->free_list);
> +		}
> +
> +		if (dev->ops->get_bb_tbl) {
> +			ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
> +					dev->blks_per_lun, gennvm_block_bb, gn);
> +			if (ret)
> +				pr_err("gen_nvm: could not read BB table\n");
> +		}
> +	}
> +
> +	if (dev->ops->get_l2p_tbl) {
> +		ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
> +							gennvm_block_map, dev);
> +		if (ret) {
> +			pr_err("gen_nvm: could not read L2P table.\n");
> +			pr_warn("gen_nvm: default block initialization");
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_register(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn;
> +	int ret;
> +
> +	gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
> +	if (!gn)
> +		return -ENOMEM;
> +
> +	gn->nr_luns = dev->nr_luns;
> +	dev->mp = gn;
> +
> +	ret = gennvm_luns_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize luns\n");
> +		goto err;
> +	}
> +
> +	ret = gennvm_blocks_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize blocks\n");
> +		goto err;
> +	}
> +
> +	return 1;
> +err:
> +	kfree(gn);
> +	return ret;
> +}
> +
> +static void gennvm_unregister(struct nvm_dev *dev)
> +{
> +	gennvm_blocks_free(dev);
> +	gennvm_luns_free(dev);
> +	kfree(dev->mp);
> +	dev->mp = NULL;
> +}
> +
> +static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
> +				struct nvm_lun *vlun, unsigned long flags)
> +{
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +	struct nvm_block *blk = NULL;
> +	int is_gc = flags & NVM_IOTYPE_GC;
> +
> +	BUG_ON(!lun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	if (list_empty(&lun->free_list)) {
> +		pr_err_ratelimited("gen_nvm: lun %u have no free pages available",
> +								lun->vlun.id);
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
> +	list_move_tail(&blk->list, &lun->used_list);
> +	blk->type = 1;
> +
> +	lun->vlun.nr_free_blocks--;
> +
> +	spin_unlock(&vlun->lock);
> +out:
> +	return blk;
> +}
> +
> +static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
> +{
> +	struct nvm_lun *vlun = blk->lun;
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	switch (blk->type) {
> +	case 1:
> +		list_move_tail(&blk->list, &lun->free_list);
> +		lun->vlun.nr_free_blocks++;
> +		blk->type = 0;
> +		break;
> +	case 2:
> +		list_move_tail(&blk->list, &lun->bb_list);
> +		break;
> +	default:
> +		BUG();
> +	}
> +
> +	spin_unlock(&vlun->lock);
> +}
> +
> +static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = addr_to_generic_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
> +}
> +
> +static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = generic_to_addr_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
> +}
> +
> +static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	if (!dev->ops->submit_io)
> +		return 0;
> +
> +	/* Convert address space */
> +	gennvm_generic_to_addr_mode(dev, rqd);
> +
> +	rqd->dev = dev;
> +	return dev->ops->submit_io(dev->q, rqd);
> +}
> +
> +static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
> +								int type)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +
> +	BUG_ON(ppa->g.ch > dev->nr_chnls);
> +	BUG_ON(ppa->g.lun > dev->luns_per_chnl);
> +	BUG_ON(ppa->g.blk > dev->blks_per_lun);
> +
> +	lun = &gn->luns[ppa->g.lun * ppa->g.ch];
> +	blk = &lun->vlun.blocks[ppa->g.blk];
> +
> +	/* will be moved to bb list on put_blk from target */
> +	blk->type = type;
> +}
> +
> +/* mark block bad. It is expected the target recover from the error. */
> +static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (!dev->ops->set_bb)
> +		return;
> +
> +	if (dev->ops->set_bb(dev->q, rqd, 1))
> +		return;
> +
> +	gennvm_addr_to_generic_mode(dev, rqd);
> +
> +	/* look up blocks and mark them as bad */
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
> +	else
> +		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
> +}
> +
> +static int gennvm_end_io(struct nvm_rq *rqd, int error)
> +{
> +	struct nvm_tgt_instance *ins = rqd->ins;
> +	int ret = 0;
> +
> +	switch (error) {
> +	case NVM_RSP_SUCCESS:
> +		break;
> +	case NVM_RSP_ERR_EMPTYPAGE:
> +		break;
> +	case NVM_RSP_ERR_FAILWRITE:
> +		gennvm_mark_blk_bad(rqd->dev, rqd);
> +	default:
> +		ret++;
> +	}
> +
> +	ret += ins->tt->end_io(rqd, error);
> +
> +	return ret;
> +}
> +
> +static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
> +							unsigned long flags)
> +{
> +	int plane_cnt = 0, pl_idx, ret;
> +	struct ppa_addr addr;
> +	struct nvm_rq rqd;
> +
> +	if (!dev->ops->erase_block)
> +		return 0;
> +
> +	addr = block_to_ppa(dev, blk);
> +
> +	if (dev->plane_mode == NVM_PLANE_SINGLE) {
> +		rqd.nr_pages = 1;
> +		rqd.ppa_addr = addr;
> +	} else {
> +		plane_cnt = (1 << dev->plane_mode);
> +		rqd.nr_pages = plane_cnt;
> +
> +		rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
> +							&rqd.dma_ppa_list);
> +		if (!rqd.ppa_list) {
> +			pr_err("gen_nvm: failed to allocate dma memory\n");
> +			return -ENOMEM;
> +		}
> +
> +		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
> +			addr.g.pl = pl_idx;
> +			rqd.ppa_list[pl_idx] = addr;
> +		}
> +	}
> +
> +	gennvm_generic_to_addr_mode(dev, &rqd);
> +
> +	ret = dev->ops->erase_block(dev->q, &rqd);
> +
> +	if (plane_cnt)
> +		nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
> +
> +	return ret;
> +}
> +
> +static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +
> +	return &gn->luns[lunid].vlun;
> +}
> +
> +static void gennvm_free_blocks_print(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	unsigned int i;
> +
> +	gennvm_for_each_lun(gn, lun, i)
> +		pr_info("%s: lun%8u\t%u\n",
> +					dev->name, i, lun->vlun.nr_free_blocks);
> +}
> +
> +static struct nvmm_type gennvm = {
> +	.name		= "gennvm",
> +	.version	= {0, 1, 0},
> +
> +	.register_mgr	= gennvm_register,
> +	.unregister_mgr	= gennvm_unregister,
> +
> +	.get_blk	= gennvm_get_blk,
> +	.put_blk	= gennvm_put_blk,
> +
> +	.submit_io	= gennvm_submit_io,
> +	.end_io		= gennvm_end_io,
> +	.erase_blk	= gennvm_erase_blk,
> +
> +	.get_lun	= gennvm_get_lun,
> +	.free_blocks_print = gennvm_free_blocks_print,
> +};
> +
> +static int __init gennvm_module_init(void)
> +{
> +	return nvm_register_mgr(&gennvm);
> +}
> +
> +static void gennvm_module_exit(void)
> +{
> +	nvm_unregister_mgr(&gennvm);
> +}
> +
> +module_init(gennvm_module_init);
> +module_exit(gennvm_module_exit);
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("Block manager for Hybrid Open-Channel SSDs");
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> new file mode 100644
> index 0000000..d23bd35
> --- /dev/null
> +++ b/drivers/lightnvm/gennvm.h
> @@ -0,0 +1,46 @@
> +/*
> + * Copyright: Matias Bjorling <mb@bjorling.me>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License version
> + * 2 as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but
> + * WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + */
> +
> +#ifndef GENNVM_H_
> +#define GENNVM_H_
> +
> +#include <linux/module.h>
> +#include <linux/vmalloc.h>
> +
> +#include <linux/lightnvm.h>
> +
> +struct gen_lun {
> +	struct nvm_lun vlun;
> +
> +	int reserved_blocks;
> +	/* lun block lists */
> +	struct list_head used_list;	/* In-use blocks */
> +	struct list_head free_list;	/* Not used blocks i.e. released
> +					 * and ready for use
> +					 */
> +	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
> +					 * free_list and used_list
> +					 */
> +};
> +
> +struct gen_nvm {
> +	int nr_luns;
> +	struct gen_lun *luns;
> +};
> +
> +#define gennvm_for_each_lun(bm, lun, i) \
> +		for ((i) = 0, lun = &(bm)->luns[0]; \
> +			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> +
> +#endif /* GENNVM_H_ */
>

--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

WARNING: multiple messages have this Message-ID (diff)
From: yangds.fnst@cn.fujitsu.com (Dongsheng Yang)
Subject: [PATCH v13 2/5] gennvm: Generic NVM manager
Date: Thu, 29 Oct 2015 07:41:03 +0800	[thread overview]
Message-ID: <56315D0F.5010602@cn.fujitsu.com> (raw)
In-Reply-To: <1445992233-676-3-git-send-email-m@bjorling.me>

On 10/28/2015 08:30 AM, Matias Bj?rling wrote:
> The implementation for Open-Channel SSDs is divided into media
[...]
> +		lun->reserved_blocks = 2; /* for GC only */
> +		lun->vlun.id = i;
> +		lun->vlun.lun_id = i % dev->luns_per_chnl;
> +		lun->vlun.chnl_id = i / dev->luns_per_chnl;

Please use do_div(). % would be not supported in some platforms, as
the kbuild pointed in V12.

Yang

> +		lun->vlun.nr_free_blocks = dev->blks_per_lun;
> +	}
> +	return 0;
> +}
> +
> +static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
> +								void *private)
> +{
> +	struct gen_nvm *gn = private;
> +	struct gen_lun *lun = &gn->luns[lun_id];
> +	struct nvm_block *block;
> +	int i;
> +
> +	if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
> +		return 0;
> +
> +	i = -1;
> +	while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) <
> +			nr_blocks) {
> +		block = &lun->vlun.blocks[i];
> +		if (!block) {
> +			pr_err("gen_nvm: BB data is out of bounds.\n");
> +			return -EINVAL;
> +		}
> +		list_move_tail(&block->list, &lun->bb_list);
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
> +{
> +	struct nvm_dev *dev = private;
> +	struct gen_nvm *gn = dev->mp;
> +	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
> +	u64 elba = slba + nlb;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +	u64 i;
> +	int lun_id;
> +
> +	if (unlikely(elba > dev->total_pages)) {
> +		pr_err("gen_nvm: L2P data from device is out of bounds!\n");
> +		return -EINVAL;
> +	}
> +
> +	for (i = 0; i < nlb; i++) {
> +		u64 pba = le64_to_cpu(entries[i]);
> +
> +		if (unlikely(pba >= max_pages && pba != U64_MAX)) {
> +			pr_err("gen_nvm: L2P data entry is out of bounds!\n");
> +			return -EINVAL;
> +		}
> +
> +		/* Address zero is a special one. The first page on a disk is
> +		 * protected. It often holds internal device boot
> +		 * information.
> +		 */
> +		if (!pba)
> +			continue;
> +
> +		/* resolve block from physical address */
> +		lun_id = div_u64(pba, dev->sec_per_lun);
> +		lun = &gn->luns[lun_id];
> +
> +		/* Calculate block offset into lun */
> +		pba = pba - (dev->sec_per_lun * lun_id);
> +		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
> +
> +		if (!blk->type) {
> +			/* at this point, we don't know anything about the
> +			 * block. It's up to the FTL on top to re-etablish the
> +			 * block state
> +			 */
> +			list_move_tail(&blk->list, &lun->used_list);
> +			blk->type = 1;
> +			lun->vlun.nr_free_blocks--;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
> +{
> +	struct gen_lun *lun;
> +	struct nvm_block *block;
> +	sector_t lun_iter, blk_iter, cur_block_id = 0;
> +	int ret;
> +
> +	gennvm_for_each_lun(gn, lun, lun_iter) {
> +		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
> +							dev->blks_per_lun);
> +		if (!lun->vlun.blocks)
> +			return -ENOMEM;
> +
> +		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
> +			block = &lun->vlun.blocks[blk_iter];
> +
> +			INIT_LIST_HEAD(&block->list);
> +
> +			block->lun = &lun->vlun;
> +			block->id = cur_block_id++;
> +
> +			/* First block is reserved for device */
> +			if (unlikely(lun_iter == 0 && blk_iter == 0))
> +				continue;
> +
> +			list_add_tail(&block->list, &lun->free_list);
> +		}
> +
> +		if (dev->ops->get_bb_tbl) {
> +			ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
> +					dev->blks_per_lun, gennvm_block_bb, gn);
> +			if (ret)
> +				pr_err("gen_nvm: could not read BB table\n");
> +		}
> +	}
> +
> +	if (dev->ops->get_l2p_tbl) {
> +		ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
> +							gennvm_block_map, dev);
> +		if (ret) {
> +			pr_err("gen_nvm: could not read L2P table.\n");
> +			pr_warn("gen_nvm: default block initialization");
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int gennvm_register(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn;
> +	int ret;
> +
> +	gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
> +	if (!gn)
> +		return -ENOMEM;
> +
> +	gn->nr_luns = dev->nr_luns;
> +	dev->mp = gn;
> +
> +	ret = gennvm_luns_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize luns\n");
> +		goto err;
> +	}
> +
> +	ret = gennvm_blocks_init(dev, gn);
> +	if (ret) {
> +		pr_err("gen_nvm: could not initialize blocks\n");
> +		goto err;
> +	}
> +
> +	return 1;
> +err:
> +	kfree(gn);
> +	return ret;
> +}
> +
> +static void gennvm_unregister(struct nvm_dev *dev)
> +{
> +	gennvm_blocks_free(dev);
> +	gennvm_luns_free(dev);
> +	kfree(dev->mp);
> +	dev->mp = NULL;
> +}
> +
> +static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
> +				struct nvm_lun *vlun, unsigned long flags)
> +{
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +	struct nvm_block *blk = NULL;
> +	int is_gc = flags & NVM_IOTYPE_GC;
> +
> +	BUG_ON(!lun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	if (list_empty(&lun->free_list)) {
> +		pr_err_ratelimited("gen_nvm: lun %u have no free pages available",
> +								lun->vlun.id);
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
> +		spin_unlock(&vlun->lock);
> +		goto out;
> +	}
> +
> +	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
> +	list_move_tail(&blk->list, &lun->used_list);
> +	blk->type = 1;
> +
> +	lun->vlun.nr_free_blocks--;
> +
> +	spin_unlock(&vlun->lock);
> +out:
> +	return blk;
> +}
> +
> +static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
> +{
> +	struct nvm_lun *vlun = blk->lun;
> +	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
> +
> +	spin_lock(&vlun->lock);
> +
> +	switch (blk->type) {
> +	case 1:
> +		list_move_tail(&blk->list, &lun->free_list);
> +		lun->vlun.nr_free_blocks++;
> +		blk->type = 0;
> +		break;
> +	case 2:
> +		list_move_tail(&blk->list, &lun->bb_list);
> +		break;
> +	default:
> +		BUG();
> +	}
> +
> +	spin_unlock(&vlun->lock);
> +}
> +
> +static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = addr_to_generic_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
> +}
> +
> +static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			rqd->ppa_list[i] = generic_to_addr_mode(dev,
> +							rqd->ppa_list[i]);
> +	else
> +		rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
> +}
> +
> +static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	if (!dev->ops->submit_io)
> +		return 0;
> +
> +	/* Convert address space */
> +	gennvm_generic_to_addr_mode(dev, rqd);
> +
> +	rqd->dev = dev;
> +	return dev->ops->submit_io(dev->q, rqd);
> +}
> +
> +static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
> +								int type)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	struct nvm_block *blk;
> +
> +	BUG_ON(ppa->g.ch > dev->nr_chnls);
> +	BUG_ON(ppa->g.lun > dev->luns_per_chnl);
> +	BUG_ON(ppa->g.blk > dev->blks_per_lun);
> +
> +	lun = &gn->luns[ppa->g.lun * ppa->g.ch];
> +	blk = &lun->vlun.blocks[ppa->g.blk];
> +
> +	/* will be moved to bb list on put_blk from target */
> +	blk->type = type;
> +}
> +
> +/* mark block bad. It is expected the target recover from the error. */
> +static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
> +{
> +	int i;
> +
> +	if (!dev->ops->set_bb)
> +		return;
> +
> +	if (dev->ops->set_bb(dev->q, rqd, 1))
> +		return;
> +
> +	gennvm_addr_to_generic_mode(dev, rqd);
> +
> +	/* look up blocks and mark them as bad */
> +	if (rqd->nr_pages > 1)
> +		for (i = 0; i < rqd->nr_pages; i++)
> +			gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
> +	else
> +		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
> +}
> +
> +static int gennvm_end_io(struct nvm_rq *rqd, int error)
> +{
> +	struct nvm_tgt_instance *ins = rqd->ins;
> +	int ret = 0;
> +
> +	switch (error) {
> +	case NVM_RSP_SUCCESS:
> +		break;
> +	case NVM_RSP_ERR_EMPTYPAGE:
> +		break;
> +	case NVM_RSP_ERR_FAILWRITE:
> +		gennvm_mark_blk_bad(rqd->dev, rqd);
> +	default:
> +		ret++;
> +	}
> +
> +	ret += ins->tt->end_io(rqd, error);
> +
> +	return ret;
> +}
> +
> +static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
> +							unsigned long flags)
> +{
> +	int plane_cnt = 0, pl_idx, ret;
> +	struct ppa_addr addr;
> +	struct nvm_rq rqd;
> +
> +	if (!dev->ops->erase_block)
> +		return 0;
> +
> +	addr = block_to_ppa(dev, blk);
> +
> +	if (dev->plane_mode == NVM_PLANE_SINGLE) {
> +		rqd.nr_pages = 1;
> +		rqd.ppa_addr = addr;
> +	} else {
> +		plane_cnt = (1 << dev->plane_mode);
> +		rqd.nr_pages = plane_cnt;
> +
> +		rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
> +							&rqd.dma_ppa_list);
> +		if (!rqd.ppa_list) {
> +			pr_err("gen_nvm: failed to allocate dma memory\n");
> +			return -ENOMEM;
> +		}
> +
> +		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
> +			addr.g.pl = pl_idx;
> +			rqd.ppa_list[pl_idx] = addr;
> +		}
> +	}
> +
> +	gennvm_generic_to_addr_mode(dev, &rqd);
> +
> +	ret = dev->ops->erase_block(dev->q, &rqd);
> +
> +	if (plane_cnt)
> +		nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
> +
> +	return ret;
> +}
> +
> +static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +
> +	return &gn->luns[lunid].vlun;
> +}
> +
> +static void gennvm_free_blocks_print(struct nvm_dev *dev)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gen_lun *lun;
> +	unsigned int i;
> +
> +	gennvm_for_each_lun(gn, lun, i)
> +		pr_info("%s: lun%8u\t%u\n",
> +					dev->name, i, lun->vlun.nr_free_blocks);
> +}
> +
> +static struct nvmm_type gennvm = {
> +	.name		= "gennvm",
> +	.version	= {0, 1, 0},
> +
> +	.register_mgr	= gennvm_register,
> +	.unregister_mgr	= gennvm_unregister,
> +
> +	.get_blk	= gennvm_get_blk,
> +	.put_blk	= gennvm_put_blk,
> +
> +	.submit_io	= gennvm_submit_io,
> +	.end_io		= gennvm_end_io,
> +	.erase_blk	= gennvm_erase_blk,
> +
> +	.get_lun	= gennvm_get_lun,
> +	.free_blocks_print = gennvm_free_blocks_print,
> +};
> +
> +static int __init gennvm_module_init(void)
> +{
> +	return nvm_register_mgr(&gennvm);
> +}
> +
> +static void gennvm_module_exit(void)
> +{
> +	nvm_unregister_mgr(&gennvm);
> +}
> +
> +module_init(gennvm_module_init);
> +module_exit(gennvm_module_exit);
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("Block manager for Hybrid Open-Channel SSDs");
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> new file mode 100644
> index 0000000..d23bd35
> --- /dev/null
> +++ b/drivers/lightnvm/gennvm.h
> @@ -0,0 +1,46 @@
> +/*
> + * Copyright: Matias Bjorling <mb at bjorling.me>
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License version
> + * 2 as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but
> + * WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + */
> +
> +#ifndef GENNVM_H_
> +#define GENNVM_H_
> +
> +#include <linux/module.h>
> +#include <linux/vmalloc.h>
> +
> +#include <linux/lightnvm.h>
> +
> +struct gen_lun {
> +	struct nvm_lun vlun;
> +
> +	int reserved_blocks;
> +	/* lun block lists */
> +	struct list_head used_list;	/* In-use blocks */
> +	struct list_head free_list;	/* Not used blocks i.e. released
> +					 * and ready for use
> +					 */
> +	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
> +					 * free_list and used_list
> +					 */
> +};
> +
> +struct gen_nvm {
> +	int nr_luns;
> +	struct gen_lun *luns;
> +};
> +
> +#define gennvm_for_each_lun(bm, lun, i) \
> +		for ((i) = 0, lun = &(bm)->luns[0]; \
> +			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> +
> +#endif /* GENNVM_H_ */
>

  reply	other threads:[~2015-10-28 23:48 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-28  0:30 [PATCH v13 0/5] Support for Open-Channel SSDs Matias Bjørling
2015-10-28  0:30 ` Matias Bjørling
2015-10-28  0:30 ` Matias Bjørling
2015-10-28  0:30 ` [PATCH v13 1/5] lightnvm: " Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30 ` [PATCH v13 2/5] gennvm: Generic NVM manager Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28 23:41   ` Dongsheng Yang [this message]
2015-10-28 23:41     ` Dongsheng Yang
2015-10-28 23:41     ` Dongsheng Yang
2015-10-29  7:34     ` Jens Axboe
2015-10-29  7:34       ` Jens Axboe
2015-10-28  0:30 ` [PATCH v13 3/5] rrpc: Round-robin sector target with cost-based gc Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30 ` [PATCH v13 4/5] null_nvm: LightNVM test driver Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling
2015-10-28  0:30 ` [PATCH v13 5/5] nvme: LightNVM support Matias Bjørling
2015-10-28  0:30   ` Matias Bjørling

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=56315D0F.5010602@cn.fujitsu.com \
    --to=yangds.fnst@cn.fujitsu.com \
    --cc=Stephen.Bates@pmcs.com \
    --cc=axboe@fb.com \
    --cc=hch@infradead.org \
    --cc=jg@lightnvm.io \
    --cc=keith.busch@intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=m@bjorling.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.