All of lore.kernel.org
 help / color / mirror / Atom feed
From: Klaus Jensen <its@irrelevant.dk>
To: qemu-devel@nongnu.org
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
	qemu-block@nongnu.org, Klaus Jensen <k.jensen@samsung.com>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	Klaus Jensen <its@irrelevant.dk>,
	Andrzej Jakowski <andrzej.jakowski@linux.intel.com>,
	Minwoo Im <minwoo.im.dev@gmail.com>,
	Keith Busch <kbusch@kernel.org>, Max Reitz <mreitz@redhat.com>
Subject: [PATCH v2 15/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp
Date: Thu, 30 Jul 2020 00:06:37 +0200	[thread overview]
Message-ID: <20200729220638.344477-16-its@irrelevant.dk> (raw)
In-Reply-To: <20200729220638.344477-1-its@irrelevant.dk>

From: Klaus Jensen <k.jensen@samsung.com>

Since clean up of the request qsg/iov is now always done post-use, there
is no need to use a stack-allocated qsg/iov in nvme_dma_prp.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Acked-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 hw/block/nvme.c | 41 ++++++++++++++++++-----------------------
 1 file changed, 18 insertions(+), 23 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 045dd55376a5..55b1a68ced8c 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -381,50 +381,45 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
 }
 
 static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
-                             uint64_t prp1, uint64_t prp2, DMADirection dir)
+                             uint64_t prp1, uint64_t prp2, DMADirection dir,
+                             NvmeRequest *req)
 {
-    QEMUSGList qsg;
-    QEMUIOVector iov;
     uint16_t status = NVME_SUCCESS;
 
-    status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
+    status = nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, len, n);
     if (status) {
         return status;
     }
 
     /* assert that only one of qsg and iov carries data */
-    assert((qsg.nsg > 0) != (iov.niov > 0));
+    assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
 
-    if (qsg.nsg > 0) {
+    if (req->qsg.nsg > 0) {
         uint64_t residual;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            residual = dma_buf_write(ptr, len, &qsg);
+            residual = dma_buf_write(ptr, len, &req->qsg);
         } else {
-            residual = dma_buf_read(ptr, len, &qsg);
+            residual = dma_buf_read(ptr, len, &req->qsg);
         }
 
         if (unlikely(residual)) {
             trace_pci_nvme_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_sglist_destroy(&qsg);
     } else {
         size_t bytes;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
         } else {
-            bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
         }
 
         if (unlikely(bytes != len)) {
             trace_pci_nvme_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_iovec_destroy(&iov);
     }
 
     return status;
@@ -893,7 +888,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
     }
 
     return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
-                        DMA_DIRECTION_FROM_DEVICE);
+                        DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
@@ -916,7 +911,7 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
     trans_len = MIN(sizeof(fw_log) - off, buf_len);
 
     return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
-                        DMA_DIRECTION_FROM_DEVICE);
+                        DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
@@ -941,7 +936,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
     trans_len = MIN(sizeof(errlog) - off, buf_len);
 
     return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
-                        DMA_DIRECTION_FROM_DEVICE);
+                        DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
@@ -1107,7 +1102,7 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
     trace_pci_nvme_identify_ctrl();
 
     return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
-                        prp2, DMA_DIRECTION_FROM_DEVICE);
+                        prp2, DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
@@ -1128,7 +1123,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
     ns = &n->namespaces[nsid - 1];
 
     return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
-                        prp2, DMA_DIRECTION_FROM_DEVICE);
+                        prp2, DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
@@ -1165,7 +1160,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
         }
     }
     ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
-                       DMA_DIRECTION_FROM_DEVICE);
+                       DMA_DIRECTION_FROM_DEVICE, req);
     g_free(list);
     return ret;
 }
@@ -1208,7 +1203,7 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
     stl_be_p(&ns_descrs->uuid.v, nsid);
 
     return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
-                        DMA_DIRECTION_FROM_DEVICE);
+                        DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
@@ -1291,7 +1286,7 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
     uint64_t timestamp = nvme_get_timestamp(n);
 
     return nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
-                        prp2, DMA_DIRECTION_FROM_DEVICE);
+                        prp2, DMA_DIRECTION_FROM_DEVICE, req);
 }
 
 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
@@ -1425,7 +1420,7 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
 
     ret = nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
-                       prp2, DMA_DIRECTION_TO_DEVICE);
+                       prp2, DMA_DIRECTION_TO_DEVICE, req);
     if (ret != NVME_SUCCESS) {
         return ret;
     }
-- 
2.27.0



  parent reply	other threads:[~2020-07-29 22:11 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-29 22:06 [PATCH v2 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 01/16] hw/block/nvme: memset preallocated requests structures Klaus Jensen
2020-07-30 10:14   ` Minwoo Im
2020-07-29 22:06 ` [PATCH v2 02/16] hw/block/nvme: add mapping helpers Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 03/16] hw/block/nvme: replace dma_acct with blk_acct equivalent Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 04/16] hw/block/nvme: remove redundant has_sg member Klaus Jensen
2020-07-30 10:15   ` Minwoo Im
2020-07-30 10:43   ` Maxim Levitsky
2020-07-29 22:06 ` [PATCH v2 05/16] hw/block/nvme: destroy request iov before reuse Klaus Jensen
2020-07-30 10:16   ` Minwoo Im
2020-07-30 10:43   ` Maxim Levitsky
2020-07-29 22:06 ` [PATCH v2 06/16] hw/block/nvme: refactor dma read/write Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 07/16] hw/block/nvme: add tracing to nvme_map_prp Klaus Jensen
2020-07-30 10:17   ` Minwoo Im
2020-07-30 10:45   ` Maxim Levitsky
2020-07-29 22:06 ` [PATCH v2 08/16] hw/block/nvme: add request mapping helper Klaus Jensen
2020-07-30 10:19   ` Minwoo Im
2020-07-29 22:06 ` [PATCH v2 09/16] hw/block/nvme: verify validity of prp lists in the cmb Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 10/16] hw/block/nvme: refactor request bounds checking Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 11/16] hw/block/nvme: add check for mdts Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 12/16] hw/block/nvme: be consistent about zeros vs zeroes Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 13/16] hw/block/nvme: add ns/cmd references in NvmeRequest Klaus Jensen
2020-07-29 22:06 ` [PATCH v2 14/16] hw/block/nvme: consolidate qsg/iov clearing Klaus Jensen
2020-07-30 10:31   ` Minwoo Im
2020-07-30 11:02     ` Maxim Levitsky
2020-07-30 11:09     ` Klaus Jensen
2020-07-30 12:26       ` Minwoo Im
2020-07-29 22:06 ` Klaus Jensen [this message]
2020-07-30 10:29   ` [PATCH v2 15/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp Minwoo Im
2020-07-29 22:06 ` [PATCH v2 16/16] hw/block/nvme: remove explicit qsg/iov parameters Klaus Jensen
2020-07-30 10:30   ` Minwoo Im
2020-07-30 11:03   ` Maxim Levitsky
2020-08-17  6:45 ` [PATCH v2 00/16] hw/block/nvme: dma handling and address mapping cleanup Klaus Jensen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200729220638.344477-16-its@irrelevant.dk \
    --to=its@irrelevant.dk \
    --cc=andrzej.jakowski@linux.intel.com \
    --cc=fam@euphon.net \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=minwoo.im.dev@gmail.com \
    --cc=mlevitsk@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.