linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: James Smart <jsmart2021@gmail.com>
To: linux-nvme@lists.infradead.org
Cc: James Smart <jsmart2021@gmail.com>, martin.petersen@oracle.com
Subject: [PATCH 08/29] nvmet-fc: Better size LS buffers
Date: Wed,  5 Feb 2020 10:37:32 -0800	[thread overview]
Message-ID: <20200205183753.25959-9-jsmart2021@gmail.com> (raw)
In-Reply-To: <20200205183753.25959-1-jsmart2021@gmail.com>

Current code uses NVME_FC_MAX_LS_BUFFER_SIZE (2KB) when allocating
buffers for LS requests and responses. This is considerable overkill
for what is actually defined.

Rework code to have unions for all possible requests and responses
and size based on the unions.  Remove NVME_FC_MAX_LS_BUFFER_SIZE.

Signed-off-by: James Smart <jsmart2021@gmail.com>
---
 drivers/nvme/host/fc.h   | 15 ++++++++++++++
 drivers/nvme/target/fc.c | 53 +++++++++++++++++++++---------------------------
 2 files changed, 38 insertions(+), 30 deletions(-)

diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h
index d2861cdd58ee..08fa88381d45 100644
--- a/drivers/nvme/host/fc.h
+++ b/drivers/nvme/host/fc.h
@@ -16,6 +16,21 @@
  * ******************  FC-NVME LS HANDLING ******************
  */
 
+union nvmefc_ls_requests {
+	struct fcnvme_ls_cr_assoc_rqst		rq_cr_assoc;
+	struct fcnvme_ls_cr_conn_rqst		rq_cr_conn;
+	struct fcnvme_ls_disconnect_assoc_rqst	rq_dis_assoc;
+	struct fcnvme_ls_disconnect_conn_rqst	rq_dis_conn;
+} __aligned(128);	/* alignment for other things alloc'd with */
+
+union nvmefc_ls_responses {
+	struct fcnvme_ls_rjt			rsp_rjt;
+	struct fcnvme_ls_cr_assoc_acc		rsp_cr_assoc;
+	struct fcnvme_ls_cr_conn_acc		rsp_cr_conn;
+	struct fcnvme_ls_disconnect_assoc_acc	rsp_dis_assoc;
+	struct fcnvme_ls_disconnect_conn_acc	rsp_dis_conn;
+} __aligned(128);	/* alignment for other things alloc'd with */
+
 static inline void
 nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
 {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1f3118a3b0a3..66de6bd8f4fd 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -22,9 +22,6 @@
 
 #define NVMET_LS_CTX_COUNT		256
 
-/* for this implementation, assume small single frame rqst/rsp */
-#define NVME_FC_MAX_LS_BUFFER_SIZE		2048
-
 struct nvmet_fc_tgtport;
 struct nvmet_fc_tgt_assoc;
 
@@ -37,8 +34,8 @@ struct nvmet_fc_ls_iod {
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_tgt_assoc	*assoc;
 
-	u8				*rqstbuf;
-	u8				*rspbuf;
+	union nvmefc_ls_requests	*rqstbuf;
+	union nvmefc_ls_responses	*rspbuf;
 	u16				rqstdatalen;
 	dma_addr_t			rspdma;
 
@@ -340,15 +337,16 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
 		iod->tgtport = tgtport;
 		list_add_tail(&iod->ls_list, &tgtport->ls_list);
 
-		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
-			GFP_KERNEL);
+		iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
+				       sizeof(union nvmefc_ls_responses),
+				       GFP_KERNEL);
 		if (!iod->rqstbuf)
 			goto out_fail;
 
-		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+		iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
 
 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
-						NVME_FC_MAX_LS_BUFFER_SIZE,
+						sizeof(*iod->rspbuf),
 						DMA_TO_DEVICE);
 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
 			goto out_fail;
@@ -361,7 +359,7 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
 	list_del(&iod->ls_list);
 	for (iod--, i--; i >= 0; iod--, i--) {
 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
-				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
 		kfree(iod->rqstbuf);
 		list_del(&iod->ls_list);
 	}
@@ -379,7 +377,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
 
 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
 		fc_dma_unmap_single(tgtport->dev,
-				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+				iod->rspdma, sizeof(*iod->rspbuf),
 				DMA_TO_DEVICE);
 		kfree(iod->rqstbuf);
 		list_del(&iod->ls_list);
@@ -1262,10 +1260,8 @@ static void
 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
 			struct nvmet_fc_ls_iod *iod)
 {
-	struct fcnvme_ls_cr_assoc_rqst *rqst =
-				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
-	struct fcnvme_ls_cr_assoc_acc *acc =
-				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+	struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
+	struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
 	struct nvmet_fc_tgt_queue *queue;
 	int ret = 0;
 
@@ -1313,7 +1309,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
 			"Create Association LS failed: %s\n",
 			validation_errors[ret]);
 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
-				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				sizeof(*acc), rqst->w0.ls_cmd,
 				FCNVME_RJT_RC_LOGIC,
 				FCNVME_RJT_EXP_NONE, 0);
 		return;
@@ -1348,10 +1344,8 @@ static void
 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
 			struct nvmet_fc_ls_iod *iod)
 {
-	struct fcnvme_ls_cr_conn_rqst *rqst =
-				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
-	struct fcnvme_ls_cr_conn_acc *acc =
-				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+	struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
+	struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
 	struct nvmet_fc_tgt_queue *queue;
 	int ret = 0;
 
@@ -1404,7 +1398,7 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
 			"Create Connection LS failed: %s\n",
 			validation_errors[ret]);
 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
-				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				sizeof(*acc), rqst->w0.ls_cmd,
 				(ret == VERR_NO_ASSOC) ?
 					FCNVME_RJT_RC_INV_ASSOC :
 					FCNVME_RJT_RC_LOGIC,
@@ -1437,9 +1431,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
 			struct nvmet_fc_ls_iod *iod)
 {
 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
-			(struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+						&iod->rqstbuf->rq_dis_assoc;
 	struct fcnvme_ls_disconnect_assoc_acc *acc =
-			(struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
+						&iod->rspbuf->rsp_dis_assoc;
 	struct nvmet_fc_tgt_assoc *assoc;
 	int ret = 0;
 
@@ -1484,7 +1478,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
 			"Disconnect LS failed: %s\n",
 			validation_errors[ret]);
 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
-				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				sizeof(*acc), rqst->w0.ls_cmd,
 				(ret == VERR_NO_ASSOC) ?
 					FCNVME_RJT_RC_INV_ASSOC :
 					FCNVME_RJT_RC_LOGIC,
@@ -1522,7 +1516,7 @@ nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
 
 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
-				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
 	nvmet_fc_free_ls_iod(tgtport, iod);
 	nvmet_fc_tgtport_put(tgtport);
 }
@@ -1534,7 +1528,7 @@ nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
 	int ret;
 
 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
-				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+				  sizeof(*iod->rspbuf), DMA_TO_DEVICE);
 
 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
 	if (ret)
@@ -1548,8 +1542,7 @@ static void
 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
 			struct nvmet_fc_ls_iod *iod)
 {
-	struct fcnvme_ls_rqst_w0 *w0 =
-			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+	struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
 
 	iod->lsrsp->nvme_fc_private = iod;
 	iod->lsrsp->rspbuf = iod->rspbuf;
@@ -1580,7 +1573,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
 		break;
 	default:
 		iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
-				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+				sizeof(*iod->rspbuf), w0->ls_cmd,
 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
 	}
 
@@ -1627,7 +1620,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
 	struct nvmet_fc_ls_iod *iod;
 
-	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+	if (lsreqbuf_len > sizeof(union nvmefc_ls_requests))
 		return -E2BIG;
 
 	if (!nvmet_fc_tgtport_get(tgtport))
-- 
2.13.7


_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  parent reply	other threads:[~2020-02-05 18:40 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-05 18:37 [PATCH 00/29] nvme-fc/nvmet-fc: Add FC-NVME-2 disconnect association support James Smart
2020-02-05 18:37 ` [PATCH 01/29] nvme-fc: Sync header to FC-NVME-2 rev 1.08 James Smart
2020-02-28 20:36   ` Sagi Grimberg
2020-03-06  8:16   ` Hannes Reinecke
2020-03-26 16:10   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 02/29] nvmet-fc: fix typo in comment James Smart
2020-02-28 20:36   ` Sagi Grimberg
2020-03-06  8:17   ` Hannes Reinecke
2020-03-26 16:10   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 03/29] nvme-fc and nvmet-fc: revise LLDD api for LS reception and LS request James Smart
2020-02-28 20:38   ` Sagi Grimberg
2020-03-06  8:19   ` Hannes Reinecke
2020-03-26 16:16   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 04/29] nvme-fc nvmet_fc nvme_fcloop: adapt code to changed names in api header James Smart
2020-02-28 20:40   ` Sagi Grimberg
2020-03-06  8:21   ` Hannes Reinecke
2020-03-26 16:26   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 05/29] lpfc: " James Smart
2020-02-28 20:40   ` Sagi Grimberg
2020-03-06  8:25   ` Hannes Reinecke
2020-03-26 16:30   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 06/29] nvme-fcloop: Fix deallocation of working context James Smart
2020-02-28 20:43   ` Sagi Grimberg
2020-03-06  8:34   ` Hannes Reinecke
2020-03-26 16:35   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 07/29] nvme-fc nvmet-fc: refactor for common LS definitions James Smart
2020-03-06  8:35   ` Hannes Reinecke
2020-03-26 16:36   ` Himanshu Madhani
2020-02-05 18:37 ` James Smart [this message]
2020-02-28 21:04   ` [PATCH 08/29] nvmet-fc: Better size LS buffers Sagi Grimberg
2020-03-06  8:36   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 09/29] nvme-fc: Ensure private pointers are NULL if no data James Smart
2020-02-28 21:05   ` Sagi Grimberg
2020-03-06  8:44   ` Hannes Reinecke
2020-03-26 16:39   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 10/29] nvmefc: Use common definitions for LS names, formatting, and validation James Smart
2020-03-06  8:44   ` Hannes Reinecke
2020-03-26 16:41   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 11/29] nvme-fc: convert assoc_active flag to atomic James Smart
2020-02-28 21:08   ` Sagi Grimberg
2020-03-06  8:47   ` Hannes Reinecke
2020-03-26 19:16   ` Himanshu Madhani
2020-02-05 18:37 ` [PATCH 12/29] nvme-fc: Add Disconnect Association Rcv support James Smart
2020-03-06  9:00   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 13/29] nvmet-fc: add LS failure messages James Smart
2020-03-06  9:01   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 14/29] nvmet-fc: perform small cleanups on unneeded checks James Smart
2020-03-06  9:01   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 15/29] nvmet-fc: track hostport handle for associations James Smart
2020-03-06  9:02   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 16/29] nvmet-fc: rename ls_list to ls_rcv_list James Smart
2020-03-06  9:03   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 17/29] nvmet-fc: Add Disconnect Association Xmt support James Smart
2020-03-06  9:04   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 18/29] nvme-fcloop: refactor to enable target to host LS James Smart
2020-03-06  9:06   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 19/29] nvme-fcloop: add target to host LS request support James Smart
2020-03-06  9:07   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 20/29] lpfc: Refactor lpfc nvme headers James Smart
2020-03-06  9:18   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 21/29] lpfc: Refactor nvmet_rcv_ctx to create lpfc_async_xchg_ctx James Smart
2020-03-06  9:19   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 22/29] lpfc: Commonize lpfc_async_xchg_ctx state and flag definitions James Smart
2020-03-06  9:19   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 23/29] lpfc: Refactor NVME LS receive handling James Smart
2020-03-06  9:20   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 24/29] lpfc: Refactor Send LS Request support James Smart
2020-03-06  9:20   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 25/29] lpfc: Refactor Send LS Abort support James Smart
2020-03-06  9:21   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 26/29] lpfc: Refactor Send LS Response support James Smart
2020-03-06  9:21   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 27/29] lpfc: nvme: Add Receive LS Request and Send LS Response support to nvme James Smart
2020-03-06  9:23   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 28/29] lpfc: nvmet: Add support for NVME LS request hosthandle James Smart
2020-03-06  9:23   ` Hannes Reinecke
2020-02-05 18:37 ` [PATCH 29/29] lpfc: nvmet: Add Send LS Request and Abort LS Request support James Smart
2020-03-06  9:24   ` Hannes Reinecke
2020-03-06  9:26 ` [PATCH 00/29] nvme-fc/nvmet-fc: Add FC-NVME-2 disconnect association support Hannes Reinecke
2020-03-31 14:29 ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200205183753.25959-9-jsmart2021@gmail.com \
    --to=jsmart2021@gmail.com \
    --cc=linux-nvme@lists.infradead.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).