nvdimm.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Logan Gunthorpe <logang@deltatee.com>
To: linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-rdma@vger.kernel.org,
	linux-nvdimm@lists.01.org, linux-block@vger.kernel.org
Cc: "Christian König" <christian.koenig@amd.com>,
	"Benjamin Herrenschmidt" <benh@kernel.crashing.org>,
	"Alex Williamson" <alex.williamson@redhat.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	"Max Gurtovoy" <maxg@mellanox.com>,
	"Christoph Hellwig" <hch@lst.de>
Subject: [PATCH v5 12/13] nvmet: Introduce helper functions to allocate and free request SGLs
Date: Thu, 30 Aug 2018 12:53:51 -0600	[thread overview]
Message-ID: <20180830185352.3369-13-logang@deltatee.com> (raw)
In-Reply-To: <20180830185352.3369-1-logang@deltatee.com>

Add helpers to allocate and free the SGL in a struct nvmet_req:

int nvmet_req_alloc_sgl(struct nvmet_req *req, struct nvmet_sq *sq)
void nvmet_req_free_sgl(struct nvmet_req *req)

This will be expanded in a future patch to implement peer-to-peer
memory DMAs and should be common with all target drivers. The presently
unused 'sq' argument in the alloc function will be necessary to
decide whether to use peer-to-peer memory and obtain the correct
provider to allocate the memory.

The new helpers are used in nvmet-rdma. Seeing we use req.transfer_len
as the length of the SGL it is set earlier and cleared on any error.
It also seems to be unnecessary to accumulate the length as the map_sgl
functions should only ever be called once per request.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
---
 drivers/nvme/target/core.c  | 18 ++++++++++++++++++
 drivers/nvme/target/nvmet.h |  2 ++
 drivers/nvme/target/rdma.c  | 20 ++++++++++++--------
 3 files changed, 32 insertions(+), 8 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ebf3e7a6c49e..6a1c8d5f552b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -725,6 +725,24 @@ void nvmet_req_execute(struct nvmet_req *req)
 }
 EXPORT_SYMBOL_GPL(nvmet_req_execute);
 
+int nvmet_req_alloc_sgl(struct nvmet_req *req, struct nvmet_sq *sq)
+{
+	req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+	if (!req->sg)
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+
+void nvmet_req_free_sgl(struct nvmet_req *req)
+{
+	sgl_free(req->sg);
+	req->sg = NULL;
+	req->sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+
 static inline bool nvmet_cc_en(u32 cc)
 {
 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index ec9af4ee03b6..7d6cb61021e4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -336,6 +336,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 void nvmet_req_uninit(struct nvmet_req *req);
 void nvmet_req_execute(struct nvmet_req *req);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
+int nvmet_req_alloc_sgl(struct nvmet_req *req, struct nvmet_sq *sq);
+void nvmet_req_free_sgl(struct nvmet_req *req);
 
 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
 		u16 size);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3533e918ea37..e148dee72ba5 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -489,7 +489,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
 	}
 
 	if (rsp->req.sg != rsp->cmd->inline_sg)
-		sgl_free(rsp->req.sg);
+		nvmet_req_free_sgl(&rsp->req);
 
 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
 		nvmet_rdma_process_wr_wait_list(queue);
@@ -638,24 +638,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
 {
 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
 	u64 addr = le64_to_cpu(sgl->addr);
-	u32 len = get_unaligned_le24(sgl->length);
 	u32 key = get_unaligned_le32(sgl->key);
 	int ret;
 
+	rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
 	/* no data command? */
-	if (!len)
+	if (!rsp->req.transfer_len)
 		return 0;
 
-	rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
-	if (!rsp->req.sg)
-		return NVME_SC_INTERNAL;
+	ret = nvmet_req_alloc_sgl(&rsp->req, &rsp->queue->nvme_sq);
+	if (ret < 0)
+		goto error_out;
 
 	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
 			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
 			nvmet_data_dir(&rsp->req));
 	if (ret < 0)
-		return NVME_SC_INTERNAL;
-	rsp->req.transfer_len += len;
+		goto error_out;
 	rsp->n_rdma += ret;
 
 	if (invalidate) {
@@ -664,6 +664,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
 	}
 
 	return 0;
+
+error_out:
+	rsp->req.transfer_len = 0;
+	return NVME_SC_INTERNAL;
 }
 
 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
-- 
2.11.0

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

  parent reply	other threads:[~2018-08-30 18:54 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-30 18:53 [PATCH v5 00/13] Copy Offload in NVMe Fabrics with P2P PCI Memory Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 01/13] PCI/P2PDMA: Support peer-to-peer memory Logan Gunthorpe
2018-08-31  8:04   ` Christian König
2018-08-31 15:48     ` Logan Gunthorpe
2018-09-01  8:27       ` Christoph Hellwig
2018-08-31 16:19   ` Jonathan Cameron
2018-08-31 16:26     ` Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 02/13] PCI/P2PDMA: Add sysfs group to display p2pmem stats Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 03/13] PCI/P2PDMA: Add PCI p2pmem DMA mappings to adjust the bus offset Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 04/13] PCI/P2PDMA: Introduce configfs/sysfs enable attribute helpers Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 05/13] docs-rst: Add a new directory for PCI documentation Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 06/13] PCI/P2PDMA: Add P2P DMA driver writer's documentation Logan Gunthorpe
2018-08-31  0:34   ` Randy Dunlap
2018-08-31 15:44     ` Logan Gunthorpe
2018-08-31  8:08   ` Christian König
2018-08-31 15:51     ` Logan Gunthorpe
2018-08-31 17:38       ` Christian König
2018-08-31 19:11         ` Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 07/13] block: Add PCI P2P flag for request queue and check support for requests Logan Gunthorpe
2018-08-30 19:11   ` Jens Axboe
2018-08-30 19:17     ` Logan Gunthorpe
2018-08-30 19:19       ` Jens Axboe
2018-09-01  8:28     ` Christoph Hellwig
2018-09-03 22:26       ` Logan Gunthorpe
2018-09-05 19:26         ` Jens Axboe
2018-09-05 19:33           ` Logan Gunthorpe
2018-09-05 19:45             ` Jens Axboe
2018-09-05 19:53               ` Logan Gunthorpe
2018-09-05 19:56               ` Christoph Hellwig
2018-09-05 19:54                 ` Jens Axboe
2018-09-05 20:11                   ` Christoph Hellwig
2018-09-05 20:09                     ` Logan Gunthorpe
2018-09-05 20:14                       ` Jens Axboe
2018-09-05 20:18                         ` Logan Gunthorpe
2018-09-05 20:19                           ` Jens Axboe
2018-09-05 20:32                             ` Logan Gunthorpe
2018-09-05 20:36                               ` Jens Axboe
2018-09-05 21:03                                 ` Logan Gunthorpe
2018-09-05 21:13                                   ` Christoph Hellwig
2018-09-05 21:18                                   ` Jens Axboe
2018-09-10 16:41                                   ` Christoph Hellwig
2018-09-10 18:11                                     ` Logan Gunthorpe
2018-09-11  7:10                                       ` Christoph Hellwig
2018-08-30 18:53 ` [PATCH v5 08/13] IB/core: Ensure we map P2P memory correctly in rdma_rw_ctx_[init|destroy]() Logan Gunthorpe
2018-08-31  0:18   ` Sagi Grimberg
2018-08-30 18:53 ` [PATCH v5 09/13] nvme-pci: Use PCI p2pmem subsystem to manage the CMB Logan Gunthorpe
2018-08-30 18:53 ` [PATCH v5 10/13] nvme-pci: Add support for P2P memory in requests Logan Gunthorpe
2018-09-04 15:16   ` Jason Gunthorpe
2018-09-04 15:47     ` Logan Gunthorpe
2018-09-05 19:22       ` Christoph Hellwig
2018-08-30 18:53 ` [PATCH v5 11/13] nvme-pci: Add a quirk for a pseudo CMB Logan Gunthorpe
2018-08-30 18:53 ` Logan Gunthorpe [this message]
2018-08-31  0:14   ` [PATCH v5 12/13] nvmet: Introduce helper functions to allocate and free request SGLs Sagi Grimberg
2018-08-30 18:53 ` [PATCH v5 13/13] nvmet: Optionally use PCI P2P memory Logan Gunthorpe
2018-08-31  0:25   ` Sagi Grimberg
2018-08-31 15:41     ` Logan Gunthorpe
2018-08-30 19:20 ` [PATCH v5 00/13] Copy Offload in NVMe Fabrics with P2P PCI Memory Jerome Glisse
2018-08-30 19:30   ` Logan Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180830185352.3369-13-logang@deltatee.com \
    --to=logang@deltatee.com \
    --cc=alex.williamson@redhat.com \
    --cc=benh@kernel.crashing.org \
    --cc=bhelgaas@google.com \
    --cc=christian.koenig@amd.com \
    --cc=hch@lst.de \
    --cc=jgg@mellanox.com \
    --cc=jglisse@redhat.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maxg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).