* [PATCH 1/3] lightnvm: submit erases using the I/O path
@ 2017-01-26 11:47 Javier González
2017-01-26 11:47 ` [PATCH 2/3] lightnvm: allow targets to use sysfs Javier González
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Javier González @ 2017-01-26 11:47 UTC (permalink / raw)
To: mb; +Cc: linux-block, linux-kernel, Javier González
Until now erases has been submitted as synchronous commands through a
dedicated erase function. In order to allow targets implementing
asynchronous erases, refactor the erase path so that it uses the normal
async I/O submission path. If a target requires sync I/O, it can
implement it internally. Also, adapt rrpc to use the new erase path.
Signed-off-by: Javier González <javier@cnexlabs.com>
---
drivers/lightnvm/core.c | 29 --------------------
drivers/lightnvm/rrpc.c | 64 ++++++++++++++++++++++++++++++++++++++++++--
drivers/nvme/host/lightnvm.c | 32 +++++++---------------
include/linux/lightnvm.h | 6 ++---
4 files changed, 75 insertions(+), 56 deletions(-)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 4f4db99..3a3e91d 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -617,35 +617,6 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_rq rqd;
- int ret;
-
- if (!dev->ops->erase_block)
- return 0;
-
- nvm_map_to_dev(tgt_dev, ppas);
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
- if (ret)
- return ret;
-
- nvm_rq_tgt_to_dev(tgt_dev, &rqd);
-
- rqd.flags = flags;
-
- ret = dev->ops->erase_block(dev, &rqd);
-
- nvm_free_rqd_ppalist(dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_blk);
-
int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv)
{
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e00b1d7..bec8a9f 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -407,6 +407,67 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
return 0;
}
+static void rrpc_end_io_sync(struct nvm_rq *rqd)
+{
+ struct completion *waiting = rqd->private;
+
+ complete(waiting);
+}
+
+static int pblk_erase_blk(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+ struct nvm_tgt_dev *dev = rrpc->dev;
+ struct nvm_geo *geo = &dev->geo;
+ int nr_secs = geo->plane_mode;
+ struct nvm_rq *rqd;
+ int ret;
+ int i;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
+ if (!rqd)
+ return -ENOMEM;
+ memset(rqd, 0, sizeof(struct nvm_rq));
+
+ rqd->opcode = NVM_OP_ERASE;
+ rqd->nr_ppas = nr_secs;
+ rqd->bio = NULL;
+ rqd->end_io = rrpc_end_io_sync;
+ rqd->private = &wait;
+
+ if (nr_secs > 1) {
+ rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &rqd->dma_ppa_list);
+ if (!rqd->ppa_list) {
+ pr_err("rrpc: not able to allocate ppa list\n");
+ ret = -ENOMEM;
+ goto free_rqd;
+ }
+
+ for (i = 0; i < nr_secs; i++) {
+ ppa.g.pl = i;
+ rqd->ppa_list[i] = ppa;
+ }
+ } else {
+ rqd->ppa_addr = ppa;
+ rqd->ppa_addr.g.pl = 0;
+ }
+
+ ret = nvm_submit_io(dev, rqd);
+ if (ret) {
+ pr_err("rrpr: erase I/O submission falied: %d\n", ret);
+ goto free_ppa_list;
+ }
+ wait_for_completion_io(&wait);
+
+free_ppa_list:
+ if (nr_secs > 1)
+ nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
+free_rqd:
+ mempool_free(rqd, rrpc->rq_pool);
+ return 0;
+}
+
static void rrpc_block_gc(struct work_struct *work)
{
struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
@@ -414,7 +475,6 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_tgt_dev *dev = rrpc->dev;
struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool);
@@ -430,7 +490,7 @@ static void rrpc_block_gc(struct work_struct *work)
ppa.g.lun = rlun->bppa.g.lun;
ppa.g.blk = rblk->id;
- if (nvm_erase_blk(dev, &ppa, 0))
+ if (pblk_erase_blk(rrpc, ppa))
goto put_back;
rrpc_put_blk(rrpc, rblk);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 21cac85..3c897ab 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -510,12 +510,16 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
}
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- rq->ioprio = bio_prio(bio);
- if (bio_has_data(bio))
- rq->nr_phys_segments = bio_phys_segments(q, bio);
-
- rq->__data_len = bio->bi_iter.bi_size;
- rq->bio = rq->biotail = bio;
+ if (bio) {
+ rq->ioprio = bio_prio(bio);
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+ if (bio_has_data(bio))
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
+ } else {
+ rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+ rq->__data_len = 0;
+ }
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
@@ -526,21 +530,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
return 0;
}
-static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct nvme_ns *ns = q->queuedata;
- struct nvme_nvm_command c = {};
-
- c.erase.opcode = NVM_OP_ERASE;
- c.erase.nsid = cpu_to_le32(ns->ns_id);
- c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
- c.erase.control = cpu_to_le16(rqd->flags);
-
- return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
-}
-
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
@@ -576,7 +565,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.set_bb_tbl = nvme_nvm_set_bb_tbl,
.submit_io = nvme_nvm_submit_io,
- .erase_block = nvme_nvm_erase_block,
.create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 17cd454..a75d8c0 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -13,6 +13,9 @@ enum {
NVM_IOTYPE_NONE = 0,
NVM_IOTYPE_GC = 1,
+
+ NVM_COMMAND_SYNC = 0,
+ NVM_COMMAND_ASYNC = 1,
};
#define NVM_BLK_BITS (16)
@@ -56,7 +59,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,7 +72,6 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
- nvm_erase_blk_fn *erase_block;
nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -475,7 +476,6 @@ extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/3] lightnvm: allow targets to use sysfs
2017-01-26 11:47 [PATCH 1/3] lightnvm: submit erases using the I/O path Javier González
@ 2017-01-26 11:47 ` Javier González
2017-01-30 10:27 ` Matias Bjørling
2017-01-26 11:47 ` [PATCH 3/3] lightnvm: Add CRC read error Javier González
2017-01-30 11:31 ` [PATCH 1/3] lightnvm: submit erases using the I/O path Matias Bjørling
2 siblings, 1 reply; 6+ messages in thread
From: Javier González @ 2017-01-26 11:47 UTC (permalink / raw)
To: mb; +Cc: linux-block, linux-kernel, Javier González
In order to register through the sysfs interface, a driver needs to know
its kobject. On a disk structure, this happens when the partition
information is added (device_add_disk), which for lightnvm takes place
after the target has been initialized. This means that on target
initialization, the kboject has not been created yet.
This patch adds a target function to let targets initialize their own
kboject as a child of the disk kobject.
Signed-off-by: Javier González <javier@cnexlabs.com>
---
drivers/lightnvm/core.c | 9 +++++++++
include/linux/lightnvm.h | 5 +++++
2 files changed, 14 insertions(+)
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 3a3e91d..8baae61 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -289,6 +289,9 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
set_capacity(tdisk, tt->capacity(targetdata));
add_disk(tdisk);
+ if (tt->sysfs_init && tt->sysfs_init(tdisk))
+ goto err_sysfs;
+
t->type = tt;
t->disk = tdisk;
t->dev = tgt_dev;
@@ -298,6 +301,9 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
mutex_unlock(&dev->mlock);
return 0;
+err_sysfs:
+ if (tt->exit)
+ tt->exit(targetdata);
err_init:
put_disk(tdisk);
err_queue:
@@ -320,6 +326,9 @@ static void __nvm_remove_target(struct nvm_target *t)
del_gendisk(tdisk);
blk_cleanup_queue(q);
+ if (tt->sysfs_exit)
+ tt->sysfs_exit(tdisk->private_data);
+
if (tt->exit)
tt->exit(tdisk->private_data);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index a75d8c0..11c318e 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -440,6 +440,7 @@ typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
typedef void (nvm_tgt_exit_fn)(void *);
+typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
struct nvm_tgt_type {
const char *name;
@@ -453,6 +454,10 @@ struct nvm_tgt_type {
nvm_tgt_init_fn *init;
nvm_tgt_exit_fn *exit;
+ /* sysfs */
+ nvm_tgt_sysfs_init_fn *sysfs_init;
+ nvm_tgt_exit_fn *sysfs_exit;
+
/* For internal use */
struct list_head list;
};
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/3] lightnvm: Add CRC read error
2017-01-26 11:47 [PATCH 1/3] lightnvm: submit erases using the I/O path Javier González
2017-01-26 11:47 ` [PATCH 2/3] lightnvm: allow targets to use sysfs Javier González
@ 2017-01-26 11:47 ` Javier González
2017-01-30 10:16 ` Matias Bjørling
2017-01-30 11:31 ` [PATCH 1/3] lightnvm: submit erases using the I/O path Matias Bjørling
2 siblings, 1 reply; 6+ messages in thread
From: Javier González @ 2017-01-26 11:47 UTC (permalink / raw)
To: mb; +Cc: linux-block, linux-kernel, Javier González
Let the host differentiate between a read error and a CRC check error on
the device side.
Signed-off-by: Javier González <javier@cnexlabs.com>
---
include/linux/lightnvm.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 11c318e..51cda67 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -108,6 +108,7 @@ enum {
NVM_RSP_ERR_FAILWRITE = 0x40ff,
NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
NVM_RSP_ERR_FAILECC = 0x4281,
+ NVM_RSP_ERR_FAILCRC = 0x4004,
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 3/3] lightnvm: Add CRC read error
2017-01-26 11:47 ` [PATCH 3/3] lightnvm: Add CRC read error Javier González
@ 2017-01-30 10:16 ` Matias Bjørling
0 siblings, 0 replies; 6+ messages in thread
From: Matias Bjørling @ 2017-01-30 10:16 UTC (permalink / raw)
To: Javier González; +Cc: linux-block, linux-kernel, Javier González
On 01/26/2017 12:47 PM, Javier González wrote:
> Let the host differentiate between a read error and a CRC check error on
> the device side.
>
> Signed-off-by: Javier González <javier@cnexlabs.com>
> ---
> include/linux/lightnvm.h | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 11c318e..51cda67 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -108,6 +108,7 @@ enum {
> NVM_RSP_ERR_FAILWRITE = 0x40ff,
> NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
> NVM_RSP_ERR_FAILECC = 0x4281,
> + NVM_RSP_ERR_FAILCRC = 0x4004,
> NVM_RSP_WARN_HIGHECC = 0x4700,
>
> /* Device opcodes */
>
Thanks. Applied.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/3] lightnvm: allow targets to use sysfs
2017-01-26 11:47 ` [PATCH 2/3] lightnvm: allow targets to use sysfs Javier González
@ 2017-01-30 10:27 ` Matias Bjørling
0 siblings, 0 replies; 6+ messages in thread
From: Matias Bjørling @ 2017-01-30 10:27 UTC (permalink / raw)
To: Javier González; +Cc: linux-block, linux-kernel, Javier González
On 01/26/2017 12:47 PM, Javier González wrote:
> In order to register through the sysfs interface, a driver needs to know
> its kobject. On a disk structure, this happens when the partition
> information is added (device_add_disk), which for lightnvm takes place
> after the target has been initialized. This means that on target
> initialization, the kboject has not been created yet.
>
> This patch adds a target function to let targets initialize their own
> kboject as a child of the disk kobject.
>
> Signed-off-by: Javier González <javier@cnexlabs.com>
> ---
> drivers/lightnvm/core.c | 9 +++++++++
> include/linux/lightnvm.h | 5 +++++
> 2 files changed, 14 insertions(+)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 3a3e91d..8baae61 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -289,6 +289,9 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
> set_capacity(tdisk, tt->capacity(targetdata));
> add_disk(tdisk);
>
> + if (tt->sysfs_init && tt->sysfs_init(tdisk))
> + goto err_sysfs;
> +
> t->type = tt;
> t->disk = tdisk;
> t->dev = tgt_dev;
> @@ -298,6 +301,9 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
> mutex_unlock(&dev->mlock);
>
> return 0;
> +err_sysfs:
> + if (tt->exit)
> + tt->exit(targetdata);
> err_init:
> put_disk(tdisk);
> err_queue:
> @@ -320,6 +326,9 @@ static void __nvm_remove_target(struct nvm_target *t)
> del_gendisk(tdisk);
> blk_cleanup_queue(q);
>
> + if (tt->sysfs_exit)
> + tt->sysfs_exit(tdisk->private_data);
Lets pass gendisk here.
> +
> if (tt->exit)
> tt->exit(tdisk->private_data);
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index a75d8c0..11c318e 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -440,6 +440,7 @@ typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
> typedef sector_t (nvm_tgt_capacity_fn)(void *);
> typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
> typedef void (nvm_tgt_exit_fn)(void *);
> +typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
>
> struct nvm_tgt_type {
> const char *name;
> @@ -453,6 +454,10 @@ struct nvm_tgt_type {
> nvm_tgt_init_fn *init;
> nvm_tgt_exit_fn *exit;
>
> + /* sysfs */
> + nvm_tgt_sysfs_init_fn *sysfs_init;
> + nvm_tgt_exit_fn *sysfs_exit;
Might as well typedef an exit for synergy.
> +
> /* For internal use */
> struct list_head list;
> };
>
Thanks. Applied. I have made the above modifications.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/3] lightnvm: submit erases using the I/O path
2017-01-26 11:47 [PATCH 1/3] lightnvm: submit erases using the I/O path Javier González
2017-01-26 11:47 ` [PATCH 2/3] lightnvm: allow targets to use sysfs Javier González
2017-01-26 11:47 ` [PATCH 3/3] lightnvm: Add CRC read error Javier González
@ 2017-01-30 11:31 ` Matias Bjørling
2 siblings, 0 replies; 6+ messages in thread
From: Matias Bjørling @ 2017-01-30 11:31 UTC (permalink / raw)
To: Javier González; +Cc: linux-block, linux-kernel, Javier González
On 01/26/2017 12:47 PM, Javier González wrote:
> Until now erases has been submitted as synchronous commands through a
> dedicated erase function. In order to allow targets implementing
> asynchronous erases, refactor the erase path so that it uses the normal
> async I/O submission path. If a target requires sync I/O, it can
> implement it internally. Also, adapt rrpc to use the new erase path.
>
> Signed-off-by: Javier González <javier@cnexlabs.com>
> ---
> drivers/lightnvm/core.c | 29 --------------------
> drivers/lightnvm/rrpc.c | 64 ++++++++++++++++++++++++++++++++++++++++++--
> drivers/nvme/host/lightnvm.c | 32 +++++++---------------
> include/linux/lightnvm.h | 6 ++---
> 4 files changed, 75 insertions(+), 56 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 4f4db99..3a3e91d 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -617,35 +617,6 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
> }
> EXPORT_SYMBOL(nvm_submit_io);
>
> -int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
> -{
> - struct nvm_dev *dev = tgt_dev->parent;
> - struct nvm_rq rqd;
> - int ret;
> -
> - if (!dev->ops->erase_block)
> - return 0;
> -
> - nvm_map_to_dev(tgt_dev, ppas);
> -
> - memset(&rqd, 0, sizeof(struct nvm_rq));
> -
> - ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
> - if (ret)
> - return ret;
> -
> - nvm_rq_tgt_to_dev(tgt_dev, &rqd);
> -
> - rqd.flags = flags;
> -
> - ret = dev->ops->erase_block(dev, &rqd);
> -
> - nvm_free_rqd_ppalist(dev, &rqd);
> -
> - return ret;
> -}
> -EXPORT_SYMBOL(nvm_erase_blk);
> -
> int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
> nvm_l2p_update_fn *update_l2p, void *priv)
> {
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e00b1d7..bec8a9f 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -407,6 +407,67 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
> return 0;
> }
>
> +static void rrpc_end_io_sync(struct nvm_rq *rqd)
> +{
> + struct completion *waiting = rqd->private;
> +
> + complete(waiting);
> +}
> +
> +static int pblk_erase_blk(struct rrpc *rrpc, struct ppa_addr ppa)
Extend the interface to take a list of PPAs.
> +{
> + struct nvm_tgt_dev *dev = rrpc->dev;
> + struct nvm_geo *geo = &dev->geo;
> + int nr_secs = geo->plane_mode;
> + struct nvm_rq *rqd;
> + int ret;
> + int i;
> + DECLARE_COMPLETION_ONSTACK(wait);
> +
> + rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
> + if (!rqd)
> + return -ENOMEM;
> + memset(rqd, 0, sizeof(struct nvm_rq));
> +
> + rqd->opcode = NVM_OP_ERASE;
> + rqd->nr_ppas = nr_secs;
> + rqd->bio = NULL;
> + rqd->end_io = rrpc_end_io_sync;
> + rqd->private = &wait;
> +
> + if (nr_secs > 1) {
> + rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
> + &rqd->dma_ppa_list);
> + if (!rqd->ppa_list) {
> + pr_err("rrpc: not able to allocate ppa list\n");
> + ret = -ENOMEM;
> + goto free_rqd;
> + }
> +
> + for (i = 0; i < nr_secs; i++) {
> + ppa.g.pl = i;
> + rqd->ppa_list[i] = ppa;
> + }
> + } else {
> + rqd->ppa_addr = ppa;
> + rqd->ppa_addr.g.pl = 0;
> + }
> +
nvm_set_rqd_ppalist() can be used instead of the above. Also, this code
should properly not be in this patch, but part of pblk.
> + ret = nvm_submit_io(dev, rqd);
> + if (ret) {
> + pr_err("rrpr: erase I/O submission falied: %d\n", ret);
> + goto free_ppa_list;
> + }
> + wait_for_completion_io(&wait);
> +
> +free_ppa_list:
> + if (nr_secs > 1)
> + nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
> +free_rqd:
> + mempool_free(rqd, rrpc->rq_pool);
> + return 0;
> +}
> +
> static void rrpc_block_gc(struct work_struct *work)
> {
> struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
> @@ -414,7 +475,6 @@ static void rrpc_block_gc(struct work_struct *work)
> struct rrpc *rrpc = gcb->rrpc;
> struct rrpc_block *rblk = gcb->rblk;
> struct rrpc_lun *rlun = rblk->rlun;
> - struct nvm_tgt_dev *dev = rrpc->dev;
> struct ppa_addr ppa;
>
> mempool_free(gcb, rrpc->gcb_pool);
> @@ -430,7 +490,7 @@ static void rrpc_block_gc(struct work_struct *work)
> ppa.g.lun = rlun->bppa.g.lun;
> ppa.g.blk = rblk->id;
>
> - if (nvm_erase_blk(dev, &ppa, 0))
> + if (pblk_erase_blk(rrpc, ppa))
> goto put_back;
>
> rrpc_put_blk(rrpc, rblk);
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index 21cac85..3c897ab 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -510,12 +510,16 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> }
> rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
>
> - rq->ioprio = bio_prio(bio);
> - if (bio_has_data(bio))
> - rq->nr_phys_segments = bio_phys_segments(q, bio);
> -
> - rq->__data_len = bio->bi_iter.bi_size;
> - rq->bio = rq->biotail = bio;
> + if (bio) {
> + rq->ioprio = bio_prio(bio);
> + rq->__data_len = bio->bi_iter.bi_size;
> + rq->bio = rq->biotail = bio;
> + if (bio_has_data(bio))
> + rq->nr_phys_segments = bio_phys_segments(q, bio);
> + } else {
> + rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
> + rq->__data_len = 0;
> + }
>
> nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
>
> @@ -526,21 +530,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> return 0;
> }
>
> -static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
> -{
> - struct request_queue *q = dev->q;
> - struct nvme_ns *ns = q->queuedata;
> - struct nvme_nvm_command c = {};
> -
> - c.erase.opcode = NVM_OP_ERASE;
> - c.erase.nsid = cpu_to_le32(ns->ns_id);
> - c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
> - c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
> - c.erase.control = cpu_to_le16(rqd->flags);
> -
> - return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
> -}
> -
> static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
> {
> struct nvme_ns *ns = nvmdev->q->queuedata;
> @@ -576,7 +565,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
> .set_bb_tbl = nvme_nvm_set_bb_tbl,
>
> .submit_io = nvme_nvm_submit_io,
> - .erase_block = nvme_nvm_erase_block,
>
> .create_dma_pool = nvme_nvm_create_dma_pool,
> .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 17cd454..a75d8c0 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -13,6 +13,9 @@ enum {
>
> NVM_IOTYPE_NONE = 0,
> NVM_IOTYPE_GC = 1,
> +
> + NVM_COMMAND_SYNC = 0,
> + NVM_COMMAND_ASYNC = 1,
We may just use the values (i.e., pass "sync" variable".)
> };
>
> #define NVM_BLK_BITS (16)
> @@ -56,7 +59,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
> typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
> typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
> typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
> -typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
> typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
> typedef void (nvm_destroy_dma_pool_fn)(void *);
> typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
> @@ -70,7 +72,6 @@ struct nvm_dev_ops {
> nvm_op_set_bb_fn *set_bb_tbl;
>
> nvm_submit_io_fn *submit_io;
> - nvm_erase_blk_fn *erase_block;
>
> nvm_create_dma_pool_fn *create_dma_pool;
> nvm_destroy_dma_pool_fn *destroy_dma_pool;
> @@ -475,7 +476,6 @@ extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
> extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
> const struct ppa_addr *, int, int);
> extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
> -extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
> extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
> void *);
> extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2017-01-30 11:41 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-26 11:47 [PATCH 1/3] lightnvm: submit erases using the I/O path Javier González
2017-01-26 11:47 ` [PATCH 2/3] lightnvm: allow targets to use sysfs Javier González
2017-01-30 10:27 ` Matias Bjørling
2017-01-26 11:47 ` [PATCH 3/3] lightnvm: Add CRC read error Javier González
2017-01-30 10:16 ` Matias Bjørling
2017-01-30 11:31 ` [PATCH 1/3] lightnvm: submit erases using the I/O path Matias Bjørling
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.