linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Hans Holmberg <hans@owltronix.com>
To: "Javier González" <javier@javigon.com>
Cc: "Matias Bjørling" <mb@lightnvm.io>,
	"Christoph Hellwig" <hch@lst.de>,
	linux-block@vger.kernel.org,
	"Linux Kernel Mailing List" <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 2/4] lightnvm: move metadata mapping to lower level driver
Date: Thu, 1 Aug 2019 09:15:59 +0200	[thread overview]
Message-ID: <CANr-nt2EAkd7uqCe2W14wxzYC3fSTCCHRJs65Zh_S4UrPfg6Hw@mail.gmail.com> (raw)
In-Reply-To: <69EAD2D8-72B5-4EC1-99EE-1436D1FF9241@javigon.com>

On Wed, Jul 31, 2019 at 3:59 PM Javier González <javier@javigon.com> wrote:
>
> > On 31 Jul 2019, at 11.41, Hans Holmberg <hans@owltronix.com> wrote:
> >
> > Now that blk_rq_map_kern can map both kmem and vmem, move
> > internal metadata mapping down to the lower level driver.
> >
> > Signed-off-by: Hans Holmberg <hans@owltronix.com>
> > ---
> > drivers/lightnvm/core.c          |  16 +++---
> > drivers/lightnvm/pblk-core.c     | 113 +++++----------------------------------
> > drivers/lightnvm/pblk-read.c     |  22 ++------
> > drivers/lightnvm/pblk-recovery.c |  39 ++------------
> > drivers/lightnvm/pblk-write.c    |  20 ++-----
> > drivers/lightnvm/pblk.h          |   8 +--
> > drivers/nvme/host/lightnvm.c     |  20 +++++--
> > include/linux/lightnvm.h         |   6 +--
> > 8 files changed, 54 insertions(+), 190 deletions(-)
> >
> > diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> > index 01d098fb96ac..3cd03582a2ed 100644
> > --- a/drivers/lightnvm/core.c
> > +++ b/drivers/lightnvm/core.c
> > @@ -731,7 +731,7 @@ static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
> >       return flags;
> > }
> >
> > -int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
> > +int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
> > {
> >       struct nvm_dev *dev = tgt_dev->parent;
> >       int ret;
> > @@ -745,7 +745,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
> >       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
> >
> >       /* In case of error, fail with right address format */
> > -     ret = dev->ops->submit_io(dev, rqd);
> > +     ret = dev->ops->submit_io(dev, rqd, buf);
> >       if (ret)
> >               nvm_rq_dev_to_tgt(tgt_dev, rqd);
> >       return ret;
> > @@ -759,7 +759,8 @@ static void nvm_sync_end_io(struct nvm_rq *rqd)
> >       complete(waiting);
> > }
> >
> > -static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd)
> > +static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
> > +                           void *buf)
> > {
> >       DECLARE_COMPLETION_ONSTACK(wait);
> >       int ret = 0;
> > @@ -767,7 +768,7 @@ static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd)
> >       rqd->end_io = nvm_sync_end_io;
> >       rqd->private = &wait;
> >
> > -     ret = dev->ops->submit_io(dev, rqd);
> > +     ret = dev->ops->submit_io(dev, rqd, buf);
> >       if (ret)
> >               return ret;
> >
> > @@ -776,7 +777,8 @@ static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd)
> >       return 0;
> > }
> >
> > -int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
> > +int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
> > +                    void *buf)
> > {
> >       struct nvm_dev *dev = tgt_dev->parent;
> >       int ret;
> > @@ -789,7 +791,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
> >       rqd->dev = tgt_dev;
> >       rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
> >
> > -     ret = nvm_submit_io_wait(dev, rqd);
> > +     ret = nvm_submit_io_wait(dev, rqd, buf);
> >
> >       return ret;
> > }
> > @@ -816,7 +818,7 @@ static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
> >       rqd->dev = NULL;
> >       rqd->flags = nvm_set_flags(&dev->geo, rqd);
> >
> > -     return nvm_submit_io_wait(dev, rqd);
> > +     return nvm_submit_io_wait(dev, rqd, NULL);
> > }
> >
> > static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
> > diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
> > index f546e6f28b8a..a58d3c84a3f2 100644
> > --- a/drivers/lightnvm/pblk-core.c
> > +++ b/drivers/lightnvm/pblk-core.c
> > @@ -507,7 +507,7 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
> >       pblk->sec_per_write = sec_per_write;
> > }
> >
> > -int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
> > +int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
> > {
> >       struct nvm_tgt_dev *dev = pblk->dev;
> >
> > @@ -518,7 +518,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
> >               return NVM_IO_ERR;
> > #endif
> >
> > -     return nvm_submit_io(dev, rqd);
> > +     return nvm_submit_io(dev, rqd, buf);
> > }
> >
> > void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
> > @@ -541,7 +541,7 @@ void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
> >       }
> > }
> >
> > -int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
> > +int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
> > {
> >       struct nvm_tgt_dev *dev = pblk->dev;
> >       int ret;
> > @@ -553,7 +553,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
> >               return NVM_IO_ERR;
> > #endif
> >
> > -     ret = nvm_submit_io_sync(dev, rqd);
> > +     ret = nvm_submit_io_sync(dev, rqd, buf);
> >
> >       if (trace_pblk_chunk_state_enabled() && !ret &&
> >           rqd->opcode == NVM_OP_PWRITE)
> > @@ -562,65 +562,19 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
> >       return ret;
> > }
> >
> > -int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
> > +static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
> > +                                void *buf)
> > {
> >       struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
> >       int ret;
> >
> >       pblk_down_chunk(pblk, ppa_list[0]);
> > -     ret = pblk_submit_io_sync(pblk, rqd);
> > +     ret = pblk_submit_io_sync(pblk, rqd, buf);
> >       pblk_up_chunk(pblk, ppa_list[0]);
> >
> >       return ret;
> > }
> >
> > -static void pblk_bio_map_addr_endio(struct bio *bio)
> > -{
> > -     bio_put(bio);
> > -}
> > -
> > -struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
> > -                           unsigned int nr_secs, unsigned int len,
> > -                           int alloc_type, gfp_t gfp_mask)
> > -{
> > -     struct nvm_tgt_dev *dev = pblk->dev;
> > -     void *kaddr = data;
> > -     struct page *page;
> > -     struct bio *bio;
> > -     int i, ret;
> > -
> > -     if (alloc_type == PBLK_KMALLOC_META)
> > -             return bio_map_kern(dev->q, kaddr, len, gfp_mask);
> > -
> > -     bio = bio_kmalloc(gfp_mask, nr_secs);
> > -     if (!bio)
> > -             return ERR_PTR(-ENOMEM);
> > -
> > -     for (i = 0; i < nr_secs; i++) {
> > -             page = vmalloc_to_page(kaddr);
> > -             if (!page) {
> > -                     pblk_err(pblk, "could not map vmalloc bio\n");
> > -                     bio_put(bio);
> > -                     bio = ERR_PTR(-ENOMEM);
> > -                     goto out;
> > -             }
> > -
> > -             ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
> > -             if (ret != PAGE_SIZE) {
> > -                     pblk_err(pblk, "could not add page to bio\n");
> > -                     bio_put(bio);
> > -                     bio = ERR_PTR(-ENOMEM);
> > -                     goto out;
> > -             }
> > -
> > -             kaddr += PAGE_SIZE;
> > -     }
> > -
> > -     bio->bi_end_io = pblk_bio_map_addr_endio;
> > -out:
> > -     return bio;
> > -}
> > -
> > int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
> >                  unsigned long secs_to_flush, bool skip_meta)
> > {
> > @@ -722,9 +676,7 @@ u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
> >
> > int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
> > {
> > -     struct nvm_tgt_dev *dev = pblk->dev;
> >       struct pblk_line_meta *lm = &pblk->lm;
> > -     struct bio *bio;
> >       struct ppa_addr *ppa_list;
> >       struct nvm_rq rqd;
> >       u64 paddr = pblk_line_smeta_start(pblk, line);
> > @@ -736,16 +688,6 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
> >       if (ret)
> >               return ret;
> >
> > -     bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             ret = PTR_ERR(bio);
> > -             goto clear_rqd;
> > -     }
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_READ, 0);
> > -
> > -     rqd.bio = bio;
> >       rqd.opcode = NVM_OP_PREAD;
> >       rqd.nr_ppas = lm->smeta_sec;
> >       rqd.is_seq = 1;
> > @@ -754,10 +696,9 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
> >       for (i = 0; i < lm->smeta_sec; i++, paddr++)
> >               ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
> >
> > -     ret = pblk_submit_io_sync(pblk, &rqd);
> > +     ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
> >       if (ret) {
> >               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
> > -             bio_put(bio);
> >               goto clear_rqd;
> >       }
> >
> > @@ -776,9 +717,7 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
> > static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
> >                                u64 paddr)
> > {
> > -     struct nvm_tgt_dev *dev = pblk->dev;
> >       struct pblk_line_meta *lm = &pblk->lm;
> > -     struct bio *bio;
> >       struct ppa_addr *ppa_list;
> >       struct nvm_rq rqd;
> >       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
> > @@ -791,16 +730,6 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
> >       if (ret)
> >               return ret;
> >
> > -     bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             ret = PTR_ERR(bio);
> > -             goto clear_rqd;
> > -     }
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
> > -
> > -     rqd.bio = bio;
> >       rqd.opcode = NVM_OP_PWRITE;
> >       rqd.nr_ppas = lm->smeta_sec;
> >       rqd.is_seq = 1;
> > @@ -814,10 +743,9 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
> >               meta->lba = lba_list[paddr] = addr_empty;
> >       }
> >
> > -     ret = pblk_submit_io_sync_sem(pblk, &rqd);
> > +     ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
> >       if (ret) {
> >               pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
> > -             bio_put(bio);
> >               goto clear_rqd;
> >       }
> >
> > @@ -838,10 +766,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
> > {
> >       struct nvm_tgt_dev *dev = pblk->dev;
> >       struct nvm_geo *geo = &dev->geo;
> > -     struct pblk_line_mgmt *l_mg = &pblk->l_mg;
> >       struct pblk_line_meta *lm = &pblk->lm;
> >       void *ppa_list_buf, *meta_list;
> > -     struct bio *bio;
> >       struct ppa_addr *ppa_list;
> >       struct nvm_rq rqd;
> >       u64 paddr = line->emeta_ssec;
> > @@ -867,17 +793,6 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
> >       rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
> >       rq_len = rq_ppas * geo->csecs;
> >
> > -     bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
> > -                                     l_mg->emeta_alloc_type, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             ret = PTR_ERR(bio);
> > -             goto free_rqd_dma;
> > -     }
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_READ, 0);
> > -
> > -     rqd.bio = bio;
> >       rqd.meta_list = meta_list;
> >       rqd.ppa_list = ppa_list_buf;
> >       rqd.dma_meta_list = dma_meta_list;
> > @@ -896,7 +811,6 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
> >               while (test_bit(pos, line->blk_bitmap)) {
> >                       paddr += min;
> >                       if (pblk_boundary_paddr_checks(pblk, paddr)) {
> > -                             bio_put(bio);
> >                               ret = -EINTR;
> >                               goto free_rqd_dma;
> >                       }
> > @@ -906,7 +820,6 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
> >               }
> >
> >               if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
> > -                     bio_put(bio);
> >                       ret = -EINTR;
> >                       goto free_rqd_dma;
> >               }
> > @@ -915,10 +828,9 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
> >                       ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
> >       }
> >
> > -     ret = pblk_submit_io_sync(pblk, &rqd);
> > +     ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
> >       if (ret) {
> >               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
> > -             bio_put(bio);
> >               goto free_rqd_dma;
> >       }
> >
> > @@ -963,7 +875,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
> >       /* The write thread schedules erases so that it minimizes disturbances
> >        * with writes. Thus, there is no need to take the LUN semaphore.
> >        */
> > -     ret = pblk_submit_io_sync(pblk, &rqd);
> > +     ret = pblk_submit_io_sync(pblk, &rqd, NULL);
> >       rqd.private = pblk;
> >       __pblk_end_io_erase(pblk, &rqd);
> >
> > @@ -1792,7 +1704,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
> >       /* The write thread schedules erases so that it minimizes disturbances
> >        * with writes. Thus, there is no need to take the LUN semaphore.
> >        */
> > -     err = pblk_submit_io(pblk, rqd);
> > +     err = pblk_submit_io(pblk, rqd, NULL);
> >       if (err) {
> >               struct nvm_tgt_dev *dev = pblk->dev;
> >               struct nvm_geo *geo = &dev->geo;
> > @@ -1923,7 +1835,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
> > static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
> > {
> >       struct pblk_line_meta *lm = &pblk->lm;
> > -     struct pblk_line_mgmt *l_mg = &pblk->l_mg;
> >       unsigned int lba_list_size = lm->emeta_len[2];
> >       struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
> >       struct pblk_emeta *emeta = line->emeta;
> > diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
> > index d98ea392fe33..d572d4559e4e 100644
> > --- a/drivers/lightnvm/pblk-read.c
> > +++ b/drivers/lightnvm/pblk-read.c
> > @@ -342,7 +342,7 @@ void pblk_submit_read(struct pblk *pblk, struct bio *bio)
> >               bio_put(int_bio);
> >               int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
> >               goto split_retry;
> > -     } else if (pblk_submit_io(pblk, rqd)) {
> > +     } else if (pblk_submit_io(pblk, rqd, NULL)) {
> >               /* Submitting IO to drive failed, let's report an error */
> >               rqd->error = -ENODEV;
> >               pblk_end_io_read(rqd);
> > @@ -419,7 +419,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
> > {
> >       struct nvm_tgt_dev *dev = pblk->dev;
> >       struct nvm_geo *geo = &dev->geo;
> > -     struct bio *bio;
> >       struct nvm_rq rqd;
> >       int data_len;
> >       int ret = NVM_IO_OK;
> > @@ -447,25 +446,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
> >               goto out;
> >
> >       data_len = (gc_rq->secs_to_gc) * geo->csecs;
> > -     bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
> > -                                             PBLK_VMALLOC_META, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             pblk_err(pblk, "could not allocate GC bio (%lu)\n",
> > -                                                             PTR_ERR(bio));
> > -             ret = PTR_ERR(bio);
> > -             goto err_free_dma;
> > -     }
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_READ, 0);
> > -
> >       rqd.opcode = NVM_OP_PREAD;
> >       rqd.nr_ppas = gc_rq->secs_to_gc;
> > -     rqd.bio = bio;
> >
> > -     if (pblk_submit_io_sync(pblk, &rqd)) {
> > +     if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
> >               ret = -EIO;
> > -             goto err_free_bio;
> > +             goto err_free_dma;
> >       }
> >
> >       pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
> > @@ -489,8 +475,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
> >       pblk_free_rqd_meta(pblk, &rqd);
> >       return ret;
> >
> > -err_free_bio:
> > -     bio_put(bio);
> > err_free_dma:
> >       pblk_free_rqd_meta(pblk, &rqd);
> >       return ret;
> > diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
> > index e6dda04de144..d5e210c3c5b7 100644
> > --- a/drivers/lightnvm/pblk-recovery.c
> > +++ b/drivers/lightnvm/pblk-recovery.c
> > @@ -178,12 +178,11 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
> >       void *meta_list;
> >       struct pblk_pad_rq *pad_rq;
> >       struct nvm_rq *rqd;
> > -     struct bio *bio;
> >       struct ppa_addr *ppa_list;
> >       void *data;
> >       __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
> >       u64 w_ptr = line->cur_sec;
> > -     int left_line_ppas, rq_ppas, rq_len;
> > +     int left_line_ppas, rq_ppas;
> >       int i, j;
> >       int ret = 0;
> >
> > @@ -212,28 +211,15 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
> >               goto fail_complete;
> >       }
> >
> > -     rq_len = rq_ppas * geo->csecs;
> > -
> > -     bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
> > -                                             PBLK_VMALLOC_META, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             ret = PTR_ERR(bio);
> > -             goto fail_complete;
> > -     }
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
> > -
> >       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
> >
> >       ret = pblk_alloc_rqd_meta(pblk, rqd);
> >       if (ret) {
> >               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
> > -             bio_put(bio);
> >               goto fail_complete;
> >       }
> >
> > -     rqd->bio = bio;
> > +     rqd->bio = NULL;
> >       rqd->opcode = NVM_OP_PWRITE;
> >       rqd->is_seq = 1;
> >       rqd->nr_ppas = rq_ppas;
> > @@ -275,13 +261,12 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
> >       kref_get(&pad_rq->ref);
> >       pblk_down_chunk(pblk, ppa_list[0]);
> >
> > -     ret = pblk_submit_io(pblk, rqd);
> > +     ret = pblk_submit_io(pblk, rqd, data);
> >       if (ret) {
> >               pblk_err(pblk, "I/O submission failed: %d\n", ret);
> >               pblk_up_chunk(pblk, ppa_list[0]);
> >               kref_put(&pad_rq->ref, pblk_recov_complete);
> >               pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
> > -             bio_put(bio);
> >               goto fail_complete;
> >       }
> >
> > @@ -375,7 +360,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
> >       struct ppa_addr *ppa_list;
> >       void *meta_list;
> >       struct nvm_rq *rqd;
> > -     struct bio *bio;
> >       void *data;
> >       dma_addr_t dma_ppa_list, dma_meta_list;
> >       __le64 *lba_list;
> > @@ -407,15 +391,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
> >       rq_len = rq_ppas * geo->csecs;
> >
> > retry_rq:
> > -     bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
> > -     if (IS_ERR(bio))
> > -             return PTR_ERR(bio);
> > -
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_READ, 0);
> > -     bio_get(bio);
> > -
> > -     rqd->bio = bio;
> > +     rqd->bio = NULL;
> >       rqd->opcode = NVM_OP_PREAD;
> >       rqd->meta_list = meta_list;
> >       rqd->nr_ppas = rq_ppas;
> > @@ -445,10 +421,9 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
> >                               addr_to_gen_ppa(pblk, paddr + j, line->id);
> >       }
> >
> > -     ret = pblk_submit_io_sync(pblk, rqd);
> > +     ret = pblk_submit_io_sync(pblk, rqd, data);
> >       if (ret) {
> >               pblk_err(pblk, "I/O submission failed: %d\n", ret);
> > -             bio_put(bio);
> >               return ret;
> >       }
> >
> > @@ -460,24 +435,20 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
> >
> >               if (padded) {
> >                       pblk_log_read_err(pblk, rqd);
> > -                     bio_put(bio);
> >                       return -EINTR;
> >               }
> >
> >               pad_distance = pblk_pad_distance(pblk, line);
> >               ret = pblk_recov_pad_line(pblk, line, pad_distance);
> >               if (ret) {
> > -                     bio_put(bio);
> >                       return ret;
> >               }
> >
> >               padded = true;
> > -             bio_put(bio);
> >               goto retry_rq;
> >       }
> >
> >       pblk_get_packed_meta(pblk, rqd);
> > -     bio_put(bio);
> >
> >       for (i = 0; i < rqd->nr_ppas; i++) {
> >               struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
> > diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
> > index 4e63f9b5954c..b9a2aeba95ab 100644
> > --- a/drivers/lightnvm/pblk-write.c
> > +++ b/drivers/lightnvm/pblk-write.c
> > @@ -373,7 +373,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
> >       struct pblk_emeta *emeta = meta_line->emeta;
> >       struct ppa_addr *ppa_list;
> >       struct pblk_g_ctx *m_ctx;
> > -     struct bio *bio;
> >       struct nvm_rq *rqd;
> >       void *data;
> >       u64 paddr;
> > @@ -391,20 +390,9 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
> >       rq_len = rq_ppas * geo->csecs;
> >       data = ((void *)emeta->buf) + emeta->mem;
> >
> > -     bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
> > -                                     l_mg->emeta_alloc_type, GFP_KERNEL);
> > -     if (IS_ERR(bio)) {
> > -             pblk_err(pblk, "failed to map emeta io");
> > -             ret = PTR_ERR(bio);
> > -             goto fail_free_rqd;
> > -     }
> > -     bio->bi_iter.bi_sector = 0; /* internal bio */
> > -     bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
> > -     rqd->bio = bio;
> > -
> >       ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
> >       if (ret)
> > -             goto fail_free_bio;
> > +             goto fail_free_rqd;
> >
> >       ppa_list = nvm_rq_to_ppa_list(rqd);
> >       for (i = 0; i < rqd->nr_ppas; ) {
> > @@ -423,7 +411,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
> >
> >       pblk_down_chunk(pblk, ppa_list[0]);
> >
> > -     ret = pblk_submit_io(pblk, rqd);
> > +     ret = pblk_submit_io(pblk, rqd, data);
> >       if (ret) {
> >               pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
> >               goto fail_rollback;
> > @@ -437,8 +425,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
> >       pblk_dealloc_page(pblk, meta_line, rq_ppas);
> >       list_add(&meta_line->list, &meta_line->list);
> >       spin_unlock(&l_mg->close_lock);
> > -fail_free_bio:
> > -     bio_put(bio);
> > fail_free_rqd:
> >       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
> >       return ret;
> > @@ -523,7 +509,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
> >       meta_line = pblk_should_submit_meta_io(pblk, rqd);
> >
> >       /* Submit data write for current data line */
> > -     err = pblk_submit_io(pblk, rqd);
> > +     err = pblk_submit_io(pblk, rqd, NULL);
> >       if (err) {
> >               pblk_err(pblk, "data I/O submission failed: %d\n", err);
> >               return NVM_IO_ERR;
> > diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
> > index a67855387f53..d515d3409a74 100644
> > --- a/drivers/lightnvm/pblk.h
> > +++ b/drivers/lightnvm/pblk.h
> > @@ -783,14 +783,10 @@ struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
> >                                             struct ppa_addr ppa);
> > void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
> > void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
> > -int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
> > -int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
> > -int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
> > +int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
> > +int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
> > int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
> > void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
> > -struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
> > -                           unsigned int nr_secs, unsigned int len,
> > -                           int alloc_type, gfp_t gfp_mask);
> > struct pblk_line *pblk_line_get(struct pblk *pblk);
> > struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
> > struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
> > diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> > index d6f121452d5d..ec46693f6b64 100644
> > --- a/drivers/nvme/host/lightnvm.c
> > +++ b/drivers/nvme/host/lightnvm.c
> > @@ -667,11 +667,14 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
> >       return rq;
> > }
> >
> > -static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> > +static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
> > +                           void *buf)
> > {
> > +     struct nvm_geo *geo = &dev->geo;
> >       struct request_queue *q = dev->q;
> >       struct nvme_nvm_command *cmd;
> >       struct request *rq;
> > +     int ret;
> >
> >       cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
> >       if (!cmd)
> > @@ -679,8 +682,15 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> >
> >       rq = nvme_nvm_alloc_request(q, rqd, cmd);
> >       if (IS_ERR(rq)) {
> > -             kfree(cmd);
> > -             return PTR_ERR(rq);
> > +             ret = PTR_ERR(rq);
> > +             goto err_free_cmd;
> > +     }
> > +
> > +     if (buf) {
> > +             ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
> > +                             GFP_KERNEL);
> > +             if (ret)
> > +                     goto err_free_cmd;
> >       }
> >
> >       rq->end_io_data = rqd;
> > @@ -688,6 +698,10 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
> >       blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
> >
> >       return 0;
> > +
> > +err_free_cmd:
> > +     kfree(cmd);
> > +     return ret;
> > }
> >
> > static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
> > diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> > index 8891647b24b1..ee8ec2e68055 100644
> > --- a/include/linux/lightnvm.h
> > +++ b/include/linux/lightnvm.h
> > @@ -88,7 +88,7 @@ typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
> > typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
> > typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
> >                                                       struct nvm_chk_meta *);
> > -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
> > +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
> > typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
> > typedef void (nvm_destroy_dma_pool_fn)(void *);
> > typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
> > @@ -680,8 +680,8 @@ extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
> >                             int, struct nvm_chk_meta *);
> > extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
> >                             int, int);
> > -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
> > -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
> > +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
> > +extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
> > extern void nvm_end_io(struct nvm_rq *);
> >
> > #else /* CONFIG_NVM */
> > --
> > 2.7.4
>
> It’s very good to get rid of pblk_bio_map_addr(). Need to look at this
> closer because I remember a number of corner cases due to the metadata
> allocation. Have you tested the vmalloc allocation path? Note that you
> need big lines for this to happen as it will try to do kmalloc if
> possible

Yeah, i checked that vmalloc worked for the emeta buffers(by switching
over manually), so we should be good.

>
> Javier

  reply	other threads:[~2019-08-01  7:16 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-31  9:41 [PATCH 0/4] lnvm/pblk mapping cleanups Hans Holmberg
2019-07-31  9:41 ` [PATCH 1/4] lightnvm: remove nvm_submit_io_sync_fn Hans Holmberg
2019-07-31 13:54   ` Javier González
2019-08-06  7:05   ` Christoph Hellwig
2019-07-31  9:41 ` [PATCH 2/4] lightnvm: move metadata mapping to lower level driver Hans Holmberg
2019-07-31 13:58   ` Javier González
2019-08-01  7:15     ` Hans Holmberg [this message]
2019-08-01  7:46       ` Javier González
2019-08-06  7:06   ` Christoph Hellwig
2019-07-31  9:41 ` [PATCH 3/4] lightnvm: pblk: use kvmalloc for metadata Hans Holmberg
2019-07-31 14:00   ` Javier González
2019-08-06  7:06   ` Christoph Hellwig
2019-07-31  9:41 ` [PATCH 4/4] block: stop exporting bio_map_kern Hans Holmberg
2019-07-31 17:13   ` Javier González
2019-08-06  7:07   ` Christoph Hellwig
2019-08-06 13:40 ` [PATCH 0/4] lnvm/pblk mapping cleanups Matias Bjørling
2019-08-06 14:16   ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CANr-nt2EAkd7uqCe2W14wxzYC3fSTCCHRJs65Zh_S4UrPfg6Hw@mail.gmail.com \
    --to=hans@owltronix.com \
    --cc=hch@lst.de \
    --cc=javier@javigon.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mb@lightnvm.io \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).