All of lore.kernel.org
 help / color / mirror / Atom feed
From: Klaus Jensen <its@irrelevant.dk>
To: Keith Busch <kbusch@kernel.org>
Cc: "Kevin Wolf" <kwolf@redhat.com>,
	"Niklas Cassel" <Niklas.Cassel@wdc.com>,
	qemu-block@nongnu.org, "Klaus Jensen" <k.jensen@samsung.com>,
	qemu-devel@nongnu.org,
	"Philippe Mathieu-Daudé" <philmd@redhat.com>
Subject: Re: [PATCH 1/9] hw/block/nvme: remove pointless rw indirection
Date: Thu, 1 Oct 2020 20:34:01 +0200	[thread overview]
Message-ID: <20201001183401.GC792691@apples.localdomain> (raw)
In-Reply-To: <20201001040508.GA681387@apples.localdomain>

[-- Attachment #1: Type: text/plain, Size: 6283 bytes --]

On Oct  1 06:05, Klaus Jensen wrote:
> On Sep 30 15:04, Keith Busch wrote:
> > The code switches on the opcode to invoke a function specific to that
> > opcode. There's no point in consolidating back to a common function that
> > just switches on that same opcode without any actual common code.
> > Restore the opcode specific behavior without going back through another
> > level of switches.
> > 
> > Signed-off-by: Keith Busch <kbusch@kernel.org>
> 
> Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
> 
> Point taken. I could've sweared I had a better reason for this.
> 
> > ---
> >  hw/block/nvme.c | 91 ++++++++++++++++---------------------------------
> >  1 file changed, 29 insertions(+), 62 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index da8344f196..db52ea0db9 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret)
> >      nvme_enqueue_req_completion(nvme_cq(req), req);
> >  }
> >  
> > -static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len,
> > -                            NvmeRequest *req)
> > -{
> > -    BlockAcctCookie *acct = &req->acct;
> > -    BlockAcctStats *stats = blk_get_stats(blk);
> > -
> > -    bool is_write = false;
> > -
> > -    trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode,
> > -                          nvme_io_opc_str(req->cmd.opcode), blk_name(blk),
> > -                          offset, len);
> > -
> > -    switch (req->cmd.opcode) {
> > -    case NVME_CMD_FLUSH:
> > -        block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH);
> > -        req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req);
> > -        break;
> > -
> > -    case NVME_CMD_WRITE_ZEROES:
> > -        block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE);
> > -        req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len,
> > -                                           BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
> > -                                           req);
> > -        break;
> > -
> > -    case NVME_CMD_WRITE:
> > -        is_write = true;
> > -
> > -        /* fallthrough */
> > -
> > -    case NVME_CMD_READ:
> > -        block_acct_start(stats, acct, len,
> > -                         is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
> > -
> > -        if (req->qsg.sg) {
> > -            if (is_write) {
> > -                req->aiocb = dma_blk_write(blk, &req->qsg, offset,
> > -                                           BDRV_SECTOR_SIZE, nvme_rw_cb, req);
> > -            } else {
> > -                req->aiocb = dma_blk_read(blk, &req->qsg, offset,
> > -                                          BDRV_SECTOR_SIZE, nvme_rw_cb, req);
> > -            }
> > -        } else {
> > -            if (is_write) {
> > -                req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0,
> > -                                             nvme_rw_cb, req);
> > -            } else {
> > -                req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0,
> > -                                            nvme_rw_cb, req);
> > -            }
> > -        }
> > -
> > -        break;
> > -    }
> > -
> > -    return NVME_NO_COMPLETE;
> > -}
> > -
> >  static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
> >  {
> > -    NvmeNamespace *ns = req->ns;
> > -    return nvme_do_aio(ns->blkconf.blk, 0, 0, req);
> > +    block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,

Uh, ouch!

This and the rest needs to be changed to ns->blkconf.blk and not
n->conf.blk.

> > +                     BLOCK_ACCT_FLUSH);
> > +    req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
> > +    return NVME_NO_COMPLETE;
> >  }
> >  
> >  static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
> > @@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
> >          return status;
> >      }
> >  
> > -    return nvme_do_aio(ns->blkconf.blk, offset, count, req);
> > +    block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
> > +                     BLOCK_ACCT_WRITE);
> > +    req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
> > +                                       BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
> > +    return NVME_NO_COMPLETE;
> >  }
> >  
> >  static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
> > @@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
> >      uint64_t data_offset = nvme_l2b(ns, slba);
> >      enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ?
> >          BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
> > +    BlockBackend *blk = ns->blkconf.blk;
> >      uint16_t status;
> >  
> >      trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
> > @@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
> >          goto invalid;
> >      }
> >  
> > -    return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req);
> > +    block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
> > +    if (req->qsg.sg) {
> > +        if (acct == BLOCK_ACCT_WRITE) {
> > +            req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
> > +                                       BDRV_SECTOR_SIZE, nvme_rw_cb, req);
> > +        } else {
> > +            req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
> > +                                      BDRV_SECTOR_SIZE, nvme_rw_cb, req);
> > +        }
> > +    } else {
> > +        if (acct == BLOCK_ACCT_WRITE) {
> > +            req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
> > +                                         nvme_rw_cb, req);
> > +        } else {
> > +            req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
> > +                                        nvme_rw_cb, req);
> > +        }
> > +    }
> > +    return NVME_NO_COMPLETE;
> >  
> >  invalid:
> >      block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct);
> > -- 
> > 2.24.1
> > 
> > 
> 
> -- 
> One of us - No more doubt, silence or taboo about mental illness.



-- 
One of us - No more doubt, silence or taboo about mental illness.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

  parent reply	other threads:[~2020-10-01 18:39 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-30 22:04 [PATCH 0/9] nvme qemu cleanups and fixes Keith Busch
2020-09-30 22:04 ` [PATCH 1/9] hw/block/nvme: remove pointless rw indirection Keith Busch
2020-10-01  4:05   ` Klaus Jensen
2020-10-01  8:48     ` Klaus Jensen
2020-10-01 15:24       ` Keith Busch
2020-10-01 18:34     ` Klaus Jensen [this message]
2020-10-06  1:49   ` Dmitry Fomichev
2020-09-30 22:04 ` [PATCH 2/9] hw/block/nvme: fix log page offset check Keith Busch
2020-09-30 23:18   ` Dmitry Fomichev
2020-10-01  4:05   ` Klaus Jensen
2020-10-01 10:11   ` Philippe Mathieu-Daudé
2020-09-30 22:04 ` [PATCH 3/9] hw/block/nvme: support per-namespace smart log Keith Busch
2020-10-01  4:10   ` Klaus Jensen
2020-10-01 15:20     ` Keith Busch
2020-10-01 17:18       ` Klaus Jensen
2020-10-01 17:30         ` Keith Busch
2020-10-01 17:34           ` Klaus Jensen
2020-10-02  8:48   ` Klaus Jensen
2020-10-06  1:57   ` Dmitry Fomichev
2020-09-30 22:04 ` [PATCH 4/9] hw/block/nvme: validate command set selected Keith Busch
2020-10-01  4:14   ` Klaus Jensen
2020-09-30 22:04 ` [PATCH 5/9] hw/block/nvme: support for admin-only command set Keith Busch
2020-10-01  0:11   ` Dmitry Fomichev
2020-10-01  4:17   ` Klaus Jensen
2020-09-30 22:04 ` [PATCH 6/9] hw/block/nvme: reject io commands if only admin command set selected Keith Busch
2020-09-30 23:11   ` Dmitry Fomichev
2020-09-30 22:04 ` [PATCH 7/9] hw/block/nvme: add nsid to get/setfeat trace events Keith Busch
2020-09-30 22:04 ` [PATCH 8/9] hw/block/nvme: add trace event for requests with non-zero status code Keith Busch
2020-09-30 23:21   ` Dmitry Fomichev
2020-10-01 15:25   ` Philippe Mathieu-Daudé
2020-09-30 22:04 ` [PATCH 9/9] hw/block/nvme: report actual LBA data shift in LBAF Keith Busch
2020-10-01  9:48   ` Klaus Jensen
2020-10-01 18:46 ` [PATCH 0/9] nvme qemu cleanups and fixes Klaus Jensen
2020-10-13  9:04 ` Klaus Jensen
2020-10-13 17:48   ` Keith Busch
2020-10-13 18:36     ` Klaus Jensen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201001183401.GC792691@apples.localdomain \
    --to=its@irrelevant.dk \
    --cc=Niklas.Cassel@wdc.com \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=philmd@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.