qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: Klaus Jensen <its@irrelevant.dk>, qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
	Klaus Jensen <k.jensen@samsung.com>,
	Keith Busch <kbusch@kernel.org>,
	qemu-block@nongnu.org, Max Reitz <mreitz@redhat.com>
Subject: Re: [PATCH 05/16] hw/block/nvme: refactor dma read/write
Date: Wed, 29 Jul 2020 20:35:46 +0300	[thread overview]
Message-ID: <ae04c5a7230a8debb5ae251b816cc1a0f44bd80e.camel@redhat.com> (raw)
In-Reply-To: <20200720113748.322965-6-its@irrelevant.dk>

On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Refactor the nvme_dma_{read,write}_prp functions into a common function
> taking a DMADirection parameter.
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
> ---
>  hw/block/nvme.c | 88 ++++++++++++++++++++++++-------------------------
>  1 file changed, 43 insertions(+), 45 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 6a1a1626b87b..d314a604db81 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -361,55 +361,50 @@ unmap:
>      return status;
>  }
>  
> -static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> -                                   uint64_t prp1, uint64_t prp2)
> +static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> +                             uint64_t prp1, uint64_t prp2, DMADirection dir)
>  {
>      QEMUSGList qsg;
>      QEMUIOVector iov;
>      uint16_t status = NVME_SUCCESS;
>  
> -    if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> -        return NVME_INVALID_FIELD | NVME_DNR;
> +    status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
> +    if (status) {
> +        return status;
>      }
> +
>      if (qsg.nsg > 0) {
> -        if (dma_buf_write(ptr, len, &qsg)) {
> -            status = NVME_INVALID_FIELD | NVME_DNR;
> +        uint64_t residual;
> +
> +        if (dir == DMA_DIRECTION_TO_DEVICE) {
> +            residual = dma_buf_write(ptr, len, &qsg);
> +        } else {
> +            residual = dma_buf_read(ptr, len, &qsg);
>          }
> -        qemu_sglist_destroy(&qsg);
> -    } else {
> -        if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
> -            status = NVME_INVALID_FIELD | NVME_DNR;
> -        }
> -        qemu_iovec_destroy(&iov);
> -    }
> -    return status;
> -}
>  
> -static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> -    uint64_t prp1, uint64_t prp2)
> -{
> -    QEMUSGList qsg;
> -    QEMUIOVector iov;
> -    uint16_t status = NVME_SUCCESS;
> -
> -    trace_pci_nvme_dma_read(prp1, prp2);
> -
> -    if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> -        return NVME_INVALID_FIELD | NVME_DNR;
> -    }
> -    if (qsg.nsg > 0) {
> -        if (unlikely(dma_buf_read(ptr, len, &qsg))) {
> +        if (unlikely(residual)) {
>              trace_pci_nvme_err_invalid_dma();
>              status = NVME_INVALID_FIELD | NVME_DNR;
>          }
> +
>          qemu_sglist_destroy(&qsg);
>      } else {
> -        if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
> +        size_t bytes;
> +
> +        if (dir == DMA_DIRECTION_TO_DEVICE) {
> +            bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
> +        } else {
> +            bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
> +        }
> +
> +        if (unlikely(bytes != len)) {
>              trace_pci_nvme_err_invalid_dma();
>              status = NVME_INVALID_FIELD | NVME_DNR;
>          }
> +
>          qemu_iovec_destroy(&iov);
>      }
> +
I know I reviewed this, but thinking now, why not to add an assert here
that we don't have both iov and qsg with data.

Best regards,
	Maxim Levitsky

>      return status;
>  }
>  
> @@ -840,8 +835,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
>          nvme_clear_events(n, NVME_AER_TYPE_SMART);
>      }
>  
> -    return nvme_dma_read_prp(n, (uint8_t *) &smart + off, trans_len, prp1,
> -                             prp2);
> +    return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
> +                        DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
> @@ -862,8 +857,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
>  
>      trans_len = MIN(sizeof(fw_log) - off, buf_len);
>  
> -    return nvme_dma_read_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1,
> -                             prp2);
> +    return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
> +                        DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> @@ -887,7 +882,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
>  
>      trans_len = MIN(sizeof(errlog) - off, buf_len);
>  
> -    return nvme_dma_read_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2);
> +    return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
> +                        DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1042,8 +1038,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
>  
>      trace_pci_nvme_identify_ctrl();
>  
> -    return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
> -        prp1, prp2);
> +    return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
> +                        prp2, DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
> @@ -1062,8 +1058,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
>  
>      ns = &n->namespaces[nsid - 1];
>  
> -    return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
> -        prp1, prp2);
> +    return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
> +                        prp2, DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
> @@ -1098,7 +1094,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
>              break;
>          }
>      }
> -    ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
> +    ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
> +                       DMA_DIRECTION_FROM_DEVICE);
>      g_free(list);
>      return ret;
>  }
> @@ -1139,7 +1136,8 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeIdentify *c)
>      ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
>      stl_be_p(&ns_descrs->uuid.v, nsid);
>  
> -    return nvme_dma_read_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2);
> +    return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
> +                        DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
> @@ -1220,8 +1218,8 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
>  
>      uint64_t timestamp = nvme_get_timestamp(n);
>  
> -    return nvme_dma_read_prp(n, (uint8_t *)&timestamp,
> -                                 sizeof(timestamp), prp1, prp2);
> +    return nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
> +                        prp2, DMA_DIRECTION_FROM_DEVICE);
>  }
>  
>  static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1352,8 +1350,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
>      uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
>      uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
> -    ret = nvme_dma_write_prp(n, (uint8_t *)&timestamp,
> -                                sizeof(timestamp), prp1, prp2);
> +    ret = nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
> +                       prp2, DMA_DIRECTION_TO_DEVICE);
>      if (ret != NVME_SUCCESS) {
>          return ret;
>      }




  parent reply	other threads:[~2020-07-29 17:36 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-20 11:37 [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-20 11:37 ` [PATCH 01/16] hw/block/nvme: memset preallocated requests structures Klaus Jensen
2020-07-20 11:37 ` [PATCH 02/16] hw/block/nvme: add mapping helpers Klaus Jensen
2020-07-29 13:57   ` Maxim Levitsky
2020-07-29 18:23     ` Klaus Jensen
2020-07-29 15:19   ` Minwoo Im
2020-07-29 20:40   ` Andrzej Jakowski
2020-07-29 21:24     ` Klaus Jensen
2020-07-29 21:51       ` Andrzej Jakowski
2020-07-29 21:53         ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 03/16] hw/block/nvme: replace dma_acct with blk_acct equivalent Klaus Jensen
2020-07-29 15:23   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 04/16] hw/block/nvme: remove redundant has_sg member Klaus Jensen
2020-07-29 15:29   ` Minwoo Im
2020-07-29 18:29     ` Klaus Jensen
     [not found]     ` <CGME20200729182946epcas2p1bef465a70c1a815654a07814aa379dc3@epcms2p5>
2020-07-30  0:34       ` Minwoo Im
2020-07-20 11:37 ` [PATCH 05/16] hw/block/nvme: refactor dma read/write Klaus Jensen
2020-07-29 15:35   ` Minwoo Im
2020-07-29 17:35   ` Maxim Levitsky [this message]
2020-07-29 18:38     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 06/16] hw/block/nvme: pass request along for tracing Klaus Jensen
2020-07-29 15:49   ` Minwoo Im
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 07/16] hw/block/nvme: add request mapping helper Klaus Jensen
2020-07-29 15:52   ` Minwoo Im
2020-07-29 18:31     ` Maxim Levitsky
2020-07-29 19:22       ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 08/16] hw/block/nvme: verify validity of prp lists in the cmb Klaus Jensen
2020-07-29 15:54   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 09/16] hw/block/nvme: refactor request bounds checking Klaus Jensen
2020-07-29 15:56   ` Minwoo Im
2020-07-20 11:37 ` [PATCH 10/16] hw/block/nvme: add check for mdts Klaus Jensen
2020-07-29 16:00   ` Minwoo Im
2020-07-29 19:30     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 11/16] hw/block/nvme: be consistent about zeros vs zeroes Klaus Jensen
2020-07-29 16:01   ` Minwoo Im
2020-07-29 17:39   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 12/16] hw/block/nvme: refactor NvmeRequest clearing Klaus Jensen
2020-07-29 16:04   ` Minwoo Im
2020-07-29 17:47   ` Maxim Levitsky
2020-07-29 19:02     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 13/16] hw/block/nvme: add a namespace reference in NvmeRequest Klaus Jensen
2020-07-29 16:06   ` Minwoo Im
2020-07-29 17:53   ` Maxim Levitsky
2020-07-20 11:37 ` [PATCH 14/16] hw/block/nvme: consolidate qsg/iov clearing Klaus Jensen
2020-07-29 16:08   ` Minwoo Im
2020-07-29 18:18   ` Maxim Levitsky
2020-07-29 19:49     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 15/16] hw/block/nvme: remove NvmeCmd parameter Klaus Jensen
2020-07-29 16:10   ` Minwoo Im
2020-07-29 19:44     ` Klaus Jensen
2020-07-29 18:25   ` Maxim Levitsky
2020-07-29 20:00     ` Klaus Jensen
2020-07-20 11:37 ` [PATCH 16/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp Klaus Jensen
2020-07-29 16:15   ` Minwoo Im
2020-07-29 19:57     ` Klaus Jensen
2020-07-27  9:42 ` [PATCH 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-27 20:44   ` Keith Busch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ae04c5a7230a8debb5ae251b816cc1a0f44bd80e.camel@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=its@irrelevant.dk \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).