All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 1/2] nvme-rdma: support up to 4 segments of inline data
  2018-06-19 19:09 [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Steve Wise
@ 2018-06-18 18:19 ` Steve Wise
  2018-06-18 18:22 ` [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) " Steve Wise
  2018-06-19 21:20 ` [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Max Gurtovoy
  2 siblings, 0 replies; 9+ messages in thread
From: Steve Wise @ 2018-06-18 18:19 UTC (permalink / raw)


Allow up to 4 segments of inline data for NVMF WRITE operations. This
reduces latency for small WRITEs by removing the need for the target to
issue a READ WR for IB, or a REG_MR + READ WR chain for iWarp.

Also cap the inline segments used based on the limitations of the
device.

Reviewed-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Sagi Grimberg <sagi at grimberg.me>
Signed-off-by: Steve Wise <swise at opengridcomputing.com>
---
 drivers/nvme/host/rdma.c | 38 +++++++++++++++++++++++++++-----------
 1 file changed, 27 insertions(+), 11 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c9424da..e02cb54 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -40,13 +40,14 @@
 
 #define NVME_RDMA_MAX_SEGMENTS		256
 
-#define NVME_RDMA_MAX_INLINE_SEGMENTS	1
+#define NVME_RDMA_MAX_INLINE_SEGMENTS	4
 
 struct nvme_rdma_device {
 	struct ib_device	*dev;
 	struct ib_pd		*pd;
 	struct kref		ref;
 	struct list_head	entry;
+	unsigned int		num_inline_segments;
 };
 
 struct nvme_rdma_qe {
@@ -117,6 +118,7 @@ struct nvme_rdma_ctrl {
 	struct sockaddr_storage src_addr;
 
 	struct nvme_ctrl	ctrl;
+	bool			use_inline_data;
 };
 
 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -249,7 +251,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
 	/* +1 for drain */
 	init_attr.cap.max_recv_wr = queue->queue_size + 1;
 	init_attr.cap.max_recv_sge = 1;
-	init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+	init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
 	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 	init_attr.qp_type = IB_QPT_RC;
 	init_attr.send_cq = queue->ib_cq;
@@ -374,6 +376,8 @@ static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
 		goto out_free_pd;
 	}
 
+	ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
+					ndev->dev->attrs.max_send_sge - 1);
 	list_add(&ndev->entry, &device_list);
 out_unlock:
 	mutex_unlock(&device_list_mutex);
@@ -1088,19 +1092,27 @@ static int nvme_rdma_set_sg_null(struct nvme_command *c)
 }
 
 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
-		struct nvme_rdma_request *req, struct nvme_command *c)
+		struct nvme_rdma_request *req, struct nvme_command *c,
+		int count)
 {
 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+	struct scatterlist *sgl = req->sg_table.sgl;
+	struct ib_sge *sge = &req->sge[1];
+	u32 len = 0;
+	int i;
 
-	req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
-	req->sge[1].length = sg_dma_len(req->sg_table.sgl);
-	req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+	for (i = 0; i < count; i++, sgl++, sge++) {
+		sge->addr = sg_dma_address(sgl);
+		sge->length = sg_dma_len(sgl);
+		sge->lkey = queue->device->pd->local_dma_lkey;
+		len += sge->length;
+	}
 
 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
-	sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+	sg->length = cpu_to_le32(len);
 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
 
-	req->num_sge++;
+	req->num_sge += count;
 	return 0;
 }
 
@@ -1193,15 +1205,16 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 		goto out_free_table;
 	}
 
-	if (count == 1) {
+	if (count <= dev->num_inline_segments) {
 		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+		    queue->ctrl->use_inline_data &&
 		    blk_rq_payload_bytes(rq) <=
 				nvme_rdma_inline_data_size(queue)) {
-			ret = nvme_rdma_map_sg_inline(queue, req, c);
+			ret = nvme_rdma_map_sg_inline(queue, req, c, count);
 			goto out;
 		}
 
-		if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+		if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
 			ret = nvme_rdma_map_sg_single(queue, req, c);
 			goto out;
 		}
@@ -1974,6 +1987,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		goto out_remove_admin_queue;
 	}
 
+	if (ctrl->ctrl.sgls & (1 << 20))
+		ctrl->use_inline_data = true;
+
 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
 		/* warn if maxcmd is lower than queue_size */
 		dev_warn(ctrl->ctrl.device,
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  2018-06-19 19:09 [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Steve Wise
  2018-06-18 18:19 ` [PATCH v5 1/2] nvme-rdma: support up to 4 segments of inline data Steve Wise
@ 2018-06-18 18:22 ` Steve Wise
  2018-06-19 22:28   ` Max Gurtovoy
  2018-06-19 21:20 ` [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Max Gurtovoy
  2 siblings, 1 reply; 9+ messages in thread
From: Steve Wise @ 2018-06-18 18:22 UTC (permalink / raw)


The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size.  So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.

We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page.  If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.

Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems.  If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced.  If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.

Signed-off-by: Steve Wise <swise at opengridcomputing.com>
---
 drivers/nvme/target/admin-cmd.c |   4 +-
 drivers/nvme/target/configfs.c  |  31 +++++++
 drivers/nvme/target/core.c      |   4 +
 drivers/nvme/target/discovery.c |   2 +-
 drivers/nvme/target/nvmet.h     |   2 +-
 drivers/nvme/target/rdma.c      | 174 ++++++++++++++++++++++++++++++----------
 6 files changed, 172 insertions(+), 45 deletions(-)

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 3880357..941a574 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -238,14 +238,14 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
 	if (ctrl->ops->has_keyed_sgls)
 		id->sgls |= cpu_to_le32(1 << 2);
-	if (ctrl->ops->sqe_inline_size)
+	if (req->port->inline_data_size)
 		id->sgls |= cpu_to_le32(1 << 20);
 
 	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
 
 	/* Max command capsule size is sqe + single page of in-capsule data */
 	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
-				  ctrl->ops->sqe_inline_size) / 16);
+				  req->port->inline_data_size) / 16);
 	/* Max response capsule size is cqe */
 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
 
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3e..2e556f6 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
 
+static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
+}
+
+static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+	int ret;
+
+	if (port->enabled) {
+		pr_err("Cannot modify inline_data_size enabled\n");
+		pr_err("Disable the port before modifying\n");
+		return -EACCES;
+	}
+	ret = kstrtoint(page, 0, &port->inline_data_size);
+	if (ret) {
+		pr_err("Invalid value '%s' for inline_data_size\n", page);
+		return -EINVAL;
+	}
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+
 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
 		char *page)
 {
@@ -874,6 +903,7 @@ static void nvmet_port_release(struct config_item *item)
 	&nvmet_attr_addr_traddr,
 	&nvmet_attr_addr_trsvcid,
 	&nvmet_attr_addr_trtype,
+	&nvmet_attr_param_inline_data_size,
 	NULL,
 };
 
@@ -903,6 +933,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
 	INIT_LIST_HEAD(&port->entry);
 	INIT_LIST_HEAD(&port->subsystems);
 	INIT_LIST_HEAD(&port->referrals);
+	port->inline_data_size = -1;	/* < 0 == let the transport choose */
 
 	port->disc_addr.portid = cpu_to_le16(portid);
 	config_group_init_type_name(&port->group, name, &nvmet_port_type);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a03da76..0b73c7c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -241,6 +241,10 @@ int nvmet_enable_port(struct nvmet_port *port)
 		return ret;
 	}
 
+	/* If the transport didn't set inline_data_size, then disable it. */
+	if (port->inline_data_size < 0)
+		port->inline_data_size = 0;
+
 	port->enabled = true;
 	return 0;
 }
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 08656b8..eae29f4 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
 	if (ctrl->ops->has_keyed_sgls)
 		id->sgls |= cpu_to_le32(1 << 2);
-	if (ctrl->ops->sqe_inline_size)
+	if (req->port->inline_data_size)
 		id->sgls |= cpu_to_le32(1 << 20);
 
 	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 480dfe1..8085679 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -116,6 +116,7 @@ struct nvmet_port {
 	struct list_head		referrals;
 	void				*priv;
 	bool				enabled;
+	int				inline_data_size;
 };
 
 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -225,7 +226,6 @@ struct nvmet_subsys_link {
 struct nvmet_fabrics_ops {
 	struct module *owner;
 	unsigned int type;
-	unsigned int sqe_inline_size;
 	unsigned int msdbd;
 	bool has_keyed_sgls : 1;
 	void (*queue_response)(struct nvmet_req *req);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 0d7f3d6..b465b9c 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -33,16 +33,17 @@
 #include "nvmet.h"
 
 /*
- * We allow up to a page of inline data to go with the SQE
+ * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
  */
-#define NVMET_RDMA_INLINE_DATA_SIZE	PAGE_SIZE
+#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
+#define NVMET_RDMA_MAX_INLINE_SGE		4
+#define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
 
 struct nvmet_rdma_cmd {
-	struct ib_sge		sge[2];
+	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
 	struct ib_cqe		cqe;
 	struct ib_recv_wr	wr;
-	struct scatterlist	inline_sg;
-	struct page		*inline_page;
+	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
 	struct nvme_command     *nvme_cmd;
 	struct nvmet_rdma_queue	*queue;
 };
@@ -116,6 +117,8 @@ struct nvmet_rdma_device {
 	size_t			srq_size;
 	struct kref		ref;
 	struct list_head	entry;
+	int			inline_data_size;
+	int			inline_page_count;
 };
 
 static bool nvmet_rdma_use_srq;
@@ -138,6 +141,11 @@ struct nvmet_rdma_device {
 
 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
 
+static int num_pages(int len)
+{
+	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+}
+
 /* XXX: really should move to a generic header sooner or later.. */
 static inline u32 get_unaligned_le24(const u8 *p)
 {
@@ -184,6 +192,71 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
 }
 
+static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_cmd *c)
+{
+	struct scatterlist *sg;
+	struct ib_sge *sge;
+	int i;
+
+	if (!ndev->inline_data_size)
+		return;
+
+	sg = c->inline_sg;
+	sge = &c->sge[1];
+
+	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+		if (sge->length)
+			ib_dma_unmap_page(ndev->device, sge->addr,
+					sge->length, DMA_FROM_DEVICE);
+		if (sg_page(sg))
+			__free_page(sg_page(sg));
+	}
+}
+
+static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_cmd *c)
+{
+	struct scatterlist *sg;
+	struct ib_sge *sge;
+	struct page *pg;
+	int len;
+	int i;
+
+	if (!ndev->inline_data_size)
+		return 0;
+
+	sg = c->inline_sg;
+	sg_init_table(sg, ndev->inline_page_count);
+	sge = &c->sge[1];
+	len = ndev->inline_data_size;
+
+	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+		pg = alloc_page(GFP_KERNEL);
+		if (!pg)
+			goto out_err;
+		sg_assign_page(sg, pg);
+		sge->addr = ib_dma_map_page(ndev->device,
+			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+		if (ib_dma_mapping_error(ndev->device, sge->addr))
+			goto out_err;
+		sge->length = min_t(int, len, PAGE_SIZE);
+		sge->lkey = ndev->pd->local_dma_lkey;
+		len -= sge->length;
+	}
+
+	return 0;
+out_err:
+	for (; i >= 0; i--, sg--, sge--) {
+		if (sge->length)
+			ib_dma_unmap_page(ndev->device, sge->addr,
+					sge->length, DMA_FROM_DEVICE);
+		if (sg_page(sg))
+			__free_page(sg_page(sg));
+	}
+	return -ENOMEM;
+}
+
 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
 			struct nvmet_rdma_cmd *c, bool admin)
 {
@@ -200,33 +273,17 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
 	c->sge[0].length = sizeof(*c->nvme_cmd);
 	c->sge[0].lkey = ndev->pd->local_dma_lkey;
 
-	if (!admin) {
-		c->inline_page = alloc_pages(GFP_KERNEL,
-				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
-		if (!c->inline_page)
-			goto out_unmap_cmd;
-		c->sge[1].addr = ib_dma_map_page(ndev->device,
-				c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
-				DMA_FROM_DEVICE);
-		if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
-			goto out_free_inline_page;
-		c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
-		c->sge[1].lkey = ndev->pd->local_dma_lkey;
-	}
+	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
+		goto out_unmap_cmd;
 
 	c->cqe.done = nvmet_rdma_recv_done;
 
 	c->wr.wr_cqe = &c->cqe;
 	c->wr.sg_list = c->sge;
-	c->wr.num_sge = admin ? 1 : 2;
+	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
 
 	return 0;
 
-out_free_inline_page:
-	if (!admin) {
-		__free_pages(c->inline_page,
-				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
-	}
 out_unmap_cmd:
 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
 			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
@@ -240,12 +297,8 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
 		struct nvmet_rdma_cmd *c, bool admin)
 {
-	if (!admin) {
-		ib_dma_unmap_page(ndev->device, c->sge[1].addr,
-				NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
-		__free_pages(c->inline_page,
-				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
-	}
+	if (!admin)
+		nvmet_rdma_free_inline_pages(ndev, c);
 	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
 				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
 	kfree(c->nvme_cmd);
@@ -429,7 +482,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
 				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
 	}
 
-	if (rsp->req.sg != &rsp->cmd->inline_sg)
+	if (rsp->req.sg != rsp->cmd->inline_sg)
 		sgl_free(rsp->req.sg);
 
 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
@@ -529,10 +582,25 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
 		u64 off)
 {
-	sg_init_table(&rsp->cmd->inline_sg, 1);
-	sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
-	rsp->req.sg = &rsp->cmd->inline_sg;
-	rsp->req.sg_cnt = 1;
+	int sg_count = num_pages(len);
+	struct scatterlist *sg;
+	int i;
+
+	sg = rsp->cmd->inline_sg;
+	for (i = 0; i < sg_count; i++, sg++) {
+		if (i < sg_count - 1)
+			sg_unmark_end(sg);
+		else
+			sg_mark_end(sg);
+		sg->offset = off;
+		sg->length = min_t(int, len, PAGE_SIZE - off);
+		len -= sg->length;
+		if (!i)
+			off = 0;
+	}
+
+	rsp->req.sg = rsp->cmd->inline_sg;
+	rsp->req.sg_cnt = sg_count;
 }
 
 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
@@ -544,7 +612,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
 	if (!nvme_is_write(rsp->req.cmd))
 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 
-	if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
+	if (off + len > rsp->queue->dev->inline_data_size) {
 		pr_err("invalid inline data offset!\n");
 		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
 	}
@@ -743,7 +811,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
 	srq_size = 4095;	/* XXX: tune */
 
 	srq_attr.attr.max_wr = srq_size;
-	srq_attr.attr.max_sge = 2;
+	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
 	srq_attr.attr.srq_limit = 0;
 	srq_attr.srq_type = IB_SRQT_BASIC;
 	srq = ib_create_srq(ndev->pd, &srq_attr);
@@ -793,7 +861,10 @@ static void nvmet_rdma_free_dev(struct kref *ref)
 static struct nvmet_rdma_device *
 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
 {
+	struct nvmet_port *port = cm_id->context;
 	struct nvmet_rdma_device *ndev;
+	int inline_page_count;
+	int inline_sge_count;
 	int ret;
 
 	mutex_lock(&device_list_mutex);
@@ -807,6 +878,18 @@ static void nvmet_rdma_free_dev(struct kref *ref)
 	if (!ndev)
 		goto out_err;
 
+	inline_page_count = num_pages(port->inline_data_size);
+	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
+				cm_id->device->attrs.max_recv_sge) - 1;
+	if (inline_page_count > inline_sge_count) {
+		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
+			port->inline_data_size, cm_id->device->name,
+			inline_sge_count * PAGE_SIZE);
+		port->inline_data_size = inline_sge_count * PAGE_SIZE;
+		inline_page_count = inline_sge_count;
+	}
+	ndev->inline_data_size = port->inline_data_size;
+	ndev->inline_page_count = inline_page_count;
 	ndev->device = cm_id->device;
 	kref_init(&ndev->ref);
 
@@ -881,7 +964,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
 	} else {
 		/* +1 for drain */
 		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
-		qp_attr.cap.max_recv_sge = 2;
+		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
 	}
 
 	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
@@ -1379,6 +1462,15 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
 		return -EINVAL;
 	}
 
+	if (port->inline_data_size < 0) {
+		port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+	} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+		pr_warn("inline_data_size %u is too large, reducing to %u\n",
+			port->inline_data_size,
+			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+		port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+	}
+
 	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
 			port->disc_addr.trsvcid, &addr);
 	if (ret) {
@@ -1418,8 +1510,9 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
 		goto out_destroy_id;
 	}
 
-	pr_info("enabling port %d (%pISpcs)\n",
-		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
+	pr_info("enabling port %d (%pISpcs) inline_data_size %d\n",
+		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr,
+		port->inline_data_size);
 	port->priv = cm_id;
 	return 0;
 
@@ -1456,7 +1549,6 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
 static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
 	.owner			= THIS_MODULE,
 	.type			= NVMF_TRTYPE_RDMA,
-	.sqe_inline_size	= NVMET_RDMA_INLINE_DATA_SIZE,
 	.msdbd			= 1,
 	.has_keyed_sgls		= 1,
 	.add_port		= nvmet_rdma_add_port,
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v5 0/2] NVMF/RDMA 16K Inline Support
@ 2018-06-19 19:09 Steve Wise
  2018-06-18 18:19 ` [PATCH v5 1/2] nvme-rdma: support up to 4 segments of inline data Steve Wise
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Steve Wise @ 2018-06-19 19:09 UTC (permalink / raw)


Hey,

For small nvmf write IO over the rdma transport, it is advantagous to
make use of inline mode to avoid the latency of the target issuing an
rdma read to fetch the data.  Currently inline is used for <= 4K writes.
8K, though, requires the rdma read.  For iWARP transports additional
latency is incurred because the target mr of the read must be registered
with remote write access.  By allowing 2 pages worth of inline payload,
I see a reduction in 8K nvmf write latency of anywhere from 2-7 usecs
depending on the RDMA transport..

This series is a respin of a series floated last year by Parav and Max
[1].  I'm continuing it now and have addressed some of the comments from
their submission [2].

The below performance improvements are achieved.  Applications doing
8K or 16K WRITEs will benefit most from this enhancement.

WRITE IOPS:
8 nullb devices, 16 connections/device,
16 cores, 1 host, 1 target,
fio randwrite, direct io, ioqdepth=256, jobs=16

             %CPU Idle                   KIOPS        
inline size  4K        8K        16K     4K        8K        16K
io size 
4K            9.36     10.47     10.44   1707      1662      1704
8K           39.07     43.66     46.84    894      1000      1002
16K          64.15     64.79     71.1     566      569        607
32K          78.84     79.5      79.89    326      329        327

WRITE Latency:
1 nullb device, 1 connection/device,
fio randwrite, direct io, ioqdepth=1, jobs=1                                                

             Usecs                                        
inline size  4K        8K        16K                                
io size                                                        
4K           12.4      12.4      12.5                                
8K           18.3      13        13.1                                
16K          20.3      20.2      14.2                                
32K          23.2      23.2      23.4                                

Changes since v4:

- rebased on 4.18-rc1.

- add perf results to cover letter.

- removed patch 1 - it has been merged.

Changes since v3:

- nvme-rdma: remove pr_debug.

- nvme-rdma: add Sagi's reviewed-by tag.

- nvmet-rdma: avoid > order 0 page allocations for inline data bufffers
by using multiple sges.  If the device cannot support the required sge
depth then reduce the inline data size to fit.

- nvmet-rdma: set max_recv_sge correctly

- nvmet-rdma: if the configured inline data size exceeds the max
supported by the rdma transport, a warning is logged and the size
is reduced.

Changes since RFC v2:

- Removed RFC tag

- prefix the inline_data_size configfs attribute with param_

- implementation/formatting tweaks suggested by Christoph

- support inline_data_size of 0, which disables inline data use

- added a new patch to fix the check for keyed sgls (bit 2 instead of 20).

- check the inline_data bit (bit 20 in the ctrl.sgls field) when
connecting and only use inline if it was set for that device.

- added Christoph's review-by tag for patch 1

[1] Original submissions:
http://lists.infradead.org/pipermail/linux-nvme/2017-February/008057.html
http://lists.infradead.org/pipermail/linux-nvme/2017-February/008059.html


[2] These comments from [1] have been addressed:

- nvme-rdma: Support up to 4 segments of inline data.

- nvme-rdma: Cap the number of inline segments to not exceed device limitations.

- nvmet-rdma: Make the inline data size configurable in nvmet-rdma via configfs.

- nvmet-rdma: avoid > 0 order page allocations

Other issues from [1] that I don't plan to incorporate into the series:

- nvme-rdma: make the sge array for inline segments dynamic based on the
target's advertised inline_data_size.  Since we're limiting the max count
to 4, I'm not sure this is worth the complexity of allocating the sge array
vs just embedding the max.

- nvmet-rdma: reduce the qp depth if the inline size greatly increases
the memory footprint.  I'm not sure how to do this in a reasonable mannor.
Since the inline data size is now configurable, do we still need this?

- nvmet-rdma: make the qp depth configurable so the admin can reduce it
manually to lower the memory footprint.

Steve Wise (2):
  nvme-rdma: support up to 4 segments of inline data
  nvmet-rdma: support max(16KB, PAGE_SIZE) inline data

 drivers/nvme/host/rdma.c        |  38 ++++++---
 drivers/nvme/target/admin-cmd.c |   4 +-
 drivers/nvme/target/configfs.c  |  31 +++++++
 drivers/nvme/target/core.c      |   4 +
 drivers/nvme/target/discovery.c |   2 +-
 drivers/nvme/target/nvmet.h     |   2 +-
 drivers/nvme/target/rdma.c      | 174 ++++++++++++++++++++++++++++++----------
 7 files changed, 199 insertions(+), 56 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 0/2] NVMF/RDMA 16K Inline Support
  2018-06-19 19:09 [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Steve Wise
  2018-06-18 18:19 ` [PATCH v5 1/2] nvme-rdma: support up to 4 segments of inline data Steve Wise
  2018-06-18 18:22 ` [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) " Steve Wise
@ 2018-06-19 21:20 ` Max Gurtovoy
  2018-06-19 21:40   ` Steve Wise
  2 siblings, 1 reply; 9+ messages in thread
From: Max Gurtovoy @ 2018-06-19 21:20 UTC (permalink / raw)




On 6/19/2018 10:09 PM, Steve Wise wrote:
> Hey,
> 
> For small nvmf write IO over the rdma transport, it is advantagous to
> make use of inline mode to avoid the latency of the target issuing an
> rdma read to fetch the data.  Currently inline is used for <= 4K writes.
> 8K, though, requires the rdma read.  For iWARP transports additional
> latency is incurred because the target mr of the read must be registered
> with remote write access.  By allowing 2 pages worth of inline payload,
> I see a reduction in 8K nvmf write latency of anywhere from 2-7 usecs
> depending on the RDMA transport..
> 
> This series is a respin of a series floated last year by Parav and Max
> [1].  I'm continuing it now and have addressed some of the comments from
> their submission [2].
> 
> The below performance improvements are achieved.  Applications doing
> 8K or 16K WRITEs will benefit most from this enhancement.
> 
> WRITE IOPS:
> 8 nullb devices, 16 connections/device,
> 16 cores, 1 host, 1 target,
> fio randwrite, direct io, ioqdepth=256, jobs=16
> 
>               %CPU Idle                   KIOPS
> inline size  4K        8K        16K     4K        8K        16K
> io size
> 4K            9.36     10.47     10.44   1707      1662      1704
> 8K           39.07     43.66     46.84    894      1000      1002
> 16K          64.15     64.79     71.1     566      569        607
> 32K          78.84     79.5      79.89    326      329        327
> 
> WRITE Latency:
> 1 nullb device, 1 connection/device,
> fio randwrite, direct io, ioqdepth=1, jobs=1
> 
>               Usecs
> inline size  4K        8K        16K
> io size
> 4K           12.4      12.4      12.5
> 8K           18.3      13        13.1
> 16K          20.3      20.2      14.2
> 32K          23.2      23.2      23.4

The code looks good to me.
I'll run some benchmarks tomorrow hopefully using Mellanox adapters and 
share result before/after the patches.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 0/2] NVMF/RDMA 16K Inline Support
  2018-06-19 21:20 ` [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Max Gurtovoy
@ 2018-06-19 21:40   ` Steve Wise
  0 siblings, 0 replies; 9+ messages in thread
From: Steve Wise @ 2018-06-19 21:40 UTC (permalink / raw)




On 6/19/2018 4:20 PM, Max Gurtovoy wrote:
>
>
> On 6/19/2018 10:09 PM, Steve Wise wrote:
>> Hey,
>>
>> For small nvmf write IO over the rdma transport, it is advantagous to
>> make use of inline mode to avoid the latency of the target issuing an
>> rdma read to fetch the data.? Currently inline is used for <= 4K writes.
>> 8K, though, requires the rdma read.? For iWARP transports additional
>> latency is incurred because the target mr of the read must be registered
>> with remote write access.? By allowing 2 pages worth of inline payload,
>> I see a reduction in 8K nvmf write latency of anywhere from 2-7 usecs
>> depending on the RDMA transport..
>>
>> This series is a respin of a series floated last year by Parav and Max
>> [1].? I'm continuing it now and have addressed some of the comments from
>> their submission [2].
>>
>> The below performance improvements are achieved.? Applications doing
>> 8K or 16K WRITEs will benefit most from this enhancement.
>>
>> WRITE IOPS:
>> 8 nullb devices, 16 connections/device,
>> 16 cores, 1 host, 1 target,
>> fio randwrite, direct io, ioqdepth=256, jobs=16
>>
>> ????????????? %CPU Idle?????????????????? KIOPS
>> inline size? 4K??????? 8K??????? 16K???? 4K??????? 8K??????? 16K
>> io size
>> 4K??????????? 9.36???? 10.47???? 10.44?? 1707????? 1662????? 1704
>> 8K?????????? 39.07???? 43.66???? 46.84??? 894????? 1000????? 1002
>> 16K????????? 64.15???? 64.79???? 71.1???? 566????? 569??????? 607
>> 32K????????? 78.84???? 79.5????? 79.89??? 326????? 329??????? 327
>>
>> WRITE Latency:
>> 1 nullb device, 1 connection/device,
>> fio randwrite, direct io, ioqdepth=1, jobs=1
>>
>> ????????????? Usecs
>> inline size? 4K??????? 8K??????? 16K
>> io size
>> 4K?????????? 12.4????? 12.4????? 12.5
>> 8K?????????? 18.3????? 13??????? 13.1
>> 16K????????? 20.3????? 20.2????? 14.2
>> 32K????????? 23.2????? 23.2????? 23.4
>
> The code looks good to me.
> I'll run some benchmarks tomorrow hopefully using Mellanox adapters
> and share result before/after the patches.

That would be great!

Thanks,

Steve.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  2018-06-18 18:22 ` [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) " Steve Wise
@ 2018-06-19 22:28   ` Max Gurtovoy
  2018-06-20 14:00     ` Steve Wise
  0 siblings, 1 reply; 9+ messages in thread
From: Max Gurtovoy @ 2018-06-19 22:28 UTC (permalink / raw)




On 6/18/2018 9:22 PM, Steve Wise wrote:
> The patch enables inline data sizes using up to 4 recv sges, and capping
> the size at 16KB or at least 1 page size.  So on a 4K page system, up to
> 16KB is supported, and for a 64K page system 1 page of 64KB is supported.
> 
> We avoid > 0 order page allocations for the inline buffers by using
> multiple recv sges, one for each page.  If the device cannot support
> the configured inline data size due to lack of enough recv sges, then
> log a warning and reduce the inline size.
> 
> Add a new configfs port attribute, called param_inline_data_size,
> to allow configuring the size of inline data for a given nvmf port.
> The maximum size allowed is still enforced by nvmet-rdma with
> NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
> And the default size, if not specified via configfs, is still PAGE_SIZE.
> This preserves the existing behavior, but allows larger inline sizes
> for small page systems.  If the configured inline data size exceeds
> NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
> reduced.  If param_inline_data_size is set to 0, then inline data is
> disabled for that nvmf port.
> 
> Signed-off-by: Steve Wise <swise at opengridcomputing.com>
> ---
>   drivers/nvme/target/admin-cmd.c |   4 +-
>   drivers/nvme/target/configfs.c  |  31 +++++++
>   drivers/nvme/target/core.c      |   4 +
>   drivers/nvme/target/discovery.c |   2 +-
>   drivers/nvme/target/nvmet.h     |   2 +-
>   drivers/nvme/target/rdma.c      | 174 ++++++++++++++++++++++++++++++----------
>   6 files changed, 172 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
> index 3880357..941a574 100644
> --- a/drivers/nvme/target/admin-cmd.c
> +++ b/drivers/nvme/target/admin-cmd.c
> @@ -238,14 +238,14 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
>   	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
>   	if (ctrl->ops->has_keyed_sgls)
>   		id->sgls |= cpu_to_le32(1 << 2);
> -	if (ctrl->ops->sqe_inline_size)
> +	if (req->port->inline_data_size)
>   		id->sgls |= cpu_to_le32(1 << 20);
>   
>   	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
>   
>   	/* Max command capsule size is sqe + single page of in-capsule data */
>   	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
> -				  ctrl->ops->sqe_inline_size) / 16);
> +				  req->port->inline_data_size) / 16);
>   	/* Max response capsule size is cqe */
>   	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
>   
> diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
> index d3f3b3e..2e556f6 100644
> --- a/drivers/nvme/target/configfs.c
> +++ b/drivers/nvme/target/configfs.c
> @@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
>   
>   CONFIGFS_ATTR(nvmet_, addr_trsvcid);
>   
> +static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
> +		char *page)
> +{
> +	struct nvmet_port *port = to_nvmet_port(item);
> +
> +	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
> +}
> +
> +static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
> +		const char *page, size_t count)
> +{
> +	struct nvmet_port *port = to_nvmet_port(item);
> +	int ret;
> +
> +	if (port->enabled) {
> +		pr_err("Cannot modify inline_data_size enabled\n");

minor fix for the error print:
"Cannot modify inline_data_size while enabled" or "Cannot modify 
inline_data_size while port enabled"

> +		pr_err("Disable the port before modifying\n");
> +		return -EACCES;
> +	}
> +	ret = kstrtoint(page, 0, &port->inline_data_size);
> +	if (ret) {
> +		pr_err("Invalid value '%s' for inline_data_size\n", page);
> +		return -EINVAL;
> +	}
> +	return count;
> +}
> +
> +CONFIGFS_ATTR(nvmet_, param_inline_data_size);
> +
>   static ssize_t nvmet_addr_trtype_show(struct config_item *item,
>   		char *page)
>   {
> @@ -874,6 +903,7 @@ static void nvmet_port_release(struct config_item *item)
>   	&nvmet_attr_addr_traddr,
>   	&nvmet_attr_addr_trsvcid,
>   	&nvmet_attr_addr_trtype,
> +	&nvmet_attr_param_inline_data_size,
>   	NULL,
>   };
>   
> @@ -903,6 +933,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
>   	INIT_LIST_HEAD(&port->entry);
>   	INIT_LIST_HEAD(&port->subsystems);
>   	INIT_LIST_HEAD(&port->referrals);
> +	port->inline_data_size = -1;	/* < 0 == let the transport choose */
>   
>   	port->disc_addr.portid = cpu_to_le16(portid);
>   	config_group_init_type_name(&port->group, name, &nvmet_port_type);
> diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
> index a03da76..0b73c7c 100644
> --- a/drivers/nvme/target/core.c
> +++ b/drivers/nvme/target/core.c
> @@ -241,6 +241,10 @@ int nvmet_enable_port(struct nvmet_port *port)
>   		return ret;
>   	}
>   
> +	/* If the transport didn't set inline_data_size, then disable it. */
> +	if (port->inline_data_size < 0)
> +		port->inline_data_size = 0;
> +
>   	port->enabled = true;
>   	return 0;
>   }
> diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
> index 08656b8..eae29f4 100644
> --- a/drivers/nvme/target/discovery.c
> +++ b/drivers/nvme/target/discovery.c
> @@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
>   	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
>   	if (ctrl->ops->has_keyed_sgls)
>   		id->sgls |= cpu_to_le32(1 << 2);
> -	if (ctrl->ops->sqe_inline_size)
> +	if (req->port->inline_data_size)
>   		id->sgls |= cpu_to_le32(1 << 20);
>   
>   	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
> index 480dfe1..8085679 100644
> --- a/drivers/nvme/target/nvmet.h
> +++ b/drivers/nvme/target/nvmet.h
> @@ -116,6 +116,7 @@ struct nvmet_port {
>   	struct list_head		referrals;
>   	void				*priv;
>   	bool				enabled;
> +	int				inline_data_size;
>   };
>   
>   static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
> @@ -225,7 +226,6 @@ struct nvmet_subsys_link {
>   struct nvmet_fabrics_ops {
>   	struct module *owner;
>   	unsigned int type;
> -	unsigned int sqe_inline_size;
>   	unsigned int msdbd;
>   	bool has_keyed_sgls : 1;
>   	void (*queue_response)(struct nvmet_req *req);
> diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
> index 0d7f3d6..b465b9c 100644
> --- a/drivers/nvme/target/rdma.c
> +++ b/drivers/nvme/target/rdma.c
> @@ -33,16 +33,17 @@
>   #include "nvmet.h"
>   
>   /*
> - * We allow up to a page of inline data to go with the SQE
> + * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
>    */
> -#define NVMET_RDMA_INLINE_DATA_SIZE	PAGE_SIZE
> +#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
> +#define NVMET_RDMA_MAX_INLINE_SGE		4
> +#define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
>   
>   struct nvmet_rdma_cmd {
> -	struct ib_sge		sge[2];
> +	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
>   	struct ib_cqe		cqe;
>   	struct ib_recv_wr	wr;
> -	struct scatterlist	inline_sg;
> -	struct page		*inline_page;
> +	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
>   	struct nvme_command     *nvme_cmd;
>   	struct nvmet_rdma_queue	*queue;
>   };
> @@ -116,6 +117,8 @@ struct nvmet_rdma_device {
>   	size_t			srq_size;
>   	struct kref		ref;
>   	struct list_head	entry;
> +	int			inline_data_size;
> +	int			inline_page_count;
>   };
>   
>   static bool nvmet_rdma_use_srq;
> @@ -138,6 +141,11 @@ struct nvmet_rdma_device {
>   
>   static const struct nvmet_fabrics_ops nvmet_rdma_ops;
>   
> +static int num_pages(int len)
> +{
> +	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
> +}
> +
>   /* XXX: really should move to a generic header sooner or later.. */
>   static inline u32 get_unaligned_le24(const u8 *p)
>   {
> @@ -184,6 +192,71 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
>   	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
>   }
>   
> +static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
> +				struct nvmet_rdma_cmd *c)
> +{
> +	struct scatterlist *sg;
> +	struct ib_sge *sge;
> +	int i;
> +
> +	if (!ndev->inline_data_size)
> +		return;
> +
> +	sg = c->inline_sg;
> +	sge = &c->sge[1];
> +
> +	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
> +		if (sge->length)
> +			ib_dma_unmap_page(ndev->device, sge->addr,
> +					sge->length, DMA_FROM_DEVICE);
> +		if (sg_page(sg))
> +			__free_page(sg_page(sg));
> +	}
> +}
> +
> +static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
> +				struct nvmet_rdma_cmd *c)
> +{
> +	struct scatterlist *sg;
> +	struct ib_sge *sge;
> +	struct page *pg;
> +	int len;
> +	int i;
> +
> +	if (!ndev->inline_data_size)
> +		return 0;
> +
> +	sg = c->inline_sg;
> +	sg_init_table(sg, ndev->inline_page_count);
> +	sge = &c->sge[1];
> +	len = ndev->inline_data_size;
> +
> +	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
> +		pg = alloc_page(GFP_KERNEL);
> +		if (!pg)
> +			goto out_err;
> +		sg_assign_page(sg, pg);
> +		sge->addr = ib_dma_map_page(ndev->device,
> +			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
> +		if (ib_dma_mapping_error(ndev->device, sge->addr))
> +			goto out_err;
> +		sge->length = min_t(int, len, PAGE_SIZE);
> +		sge->lkey = ndev->pd->local_dma_lkey;
> +		len -= sge->length;
> +	}
> +
> +	return 0;
> +out_err:
> +	for (; i >= 0; i--, sg--, sge--) {
> +		if (sge->length)
> +			ib_dma_unmap_page(ndev->device, sge->addr,
> +					sge->length, DMA_FROM_DEVICE);
> +		if (sg_page(sg))
> +			__free_page(sg_page(sg));
> +	}
> +	return -ENOMEM;
> +}
> +
>   static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
>   			struct nvmet_rdma_cmd *c, bool admin)
>   {
> @@ -200,33 +273,17 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
>   	c->sge[0].length = sizeof(*c->nvme_cmd);
>   	c->sge[0].lkey = ndev->pd->local_dma_lkey;
>   
> -	if (!admin) {
> -		c->inline_page = alloc_pages(GFP_KERNEL,
> -				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
> -		if (!c->inline_page)
> -			goto out_unmap_cmd;
> -		c->sge[1].addr = ib_dma_map_page(ndev->device,
> -				c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
> -				DMA_FROM_DEVICE);
> -		if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
> -			goto out_free_inline_page;
> -		c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
> -		c->sge[1].lkey = ndev->pd->local_dma_lkey;
> -	}
> +	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
> +		goto out_unmap_cmd;
>   
>   	c->cqe.done = nvmet_rdma_recv_done;
>   
>   	c->wr.wr_cqe = &c->cqe;
>   	c->wr.sg_list = c->sge;
> -	c->wr.num_sge = admin ? 1 : 2;
> +	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
>   
>   	return 0;
>   
> -out_free_inline_page:
> -	if (!admin) {
> -		__free_pages(c->inline_page,
> -				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
> -	}
>   out_unmap_cmd:
>   	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
>   			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
> @@ -240,12 +297,8 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
>   static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
>   		struct nvmet_rdma_cmd *c, bool admin)
>   {
> -	if (!admin) {
> -		ib_dma_unmap_page(ndev->device, c->sge[1].addr,
> -				NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
> -		__free_pages(c->inline_page,
> -				get_order(NVMET_RDMA_INLINE_DATA_SIZE));
> -	}
> +	if (!admin)
> +		nvmet_rdma_free_inline_pages(ndev, c);
>   	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
>   				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
>   	kfree(c->nvme_cmd);
> @@ -429,7 +482,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
>   				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
>   	}
>   
> -	if (rsp->req.sg != &rsp->cmd->inline_sg)
> +	if (rsp->req.sg != rsp->cmd->inline_sg)
>   		sgl_free(rsp->req.sg);
>   
>   	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
> @@ -529,10 +582,25 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
>   static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
>   		u64 off)
>   {
> -	sg_init_table(&rsp->cmd->inline_sg, 1);
> -	sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
> -	rsp->req.sg = &rsp->cmd->inline_sg;
> -	rsp->req.sg_cnt = 1;
> +	int sg_count = num_pages(len);
> +	struct scatterlist *sg;
> +	int i;
> +
> +	sg = rsp->cmd->inline_sg;
> +	for (i = 0; i < sg_count; i++, sg++) {
> +		if (i < sg_count - 1)
> +			sg_unmark_end(sg);
> +		else
> +			sg_mark_end(sg);
> +		sg->offset = off;
> +		sg->length = min_t(int, len, PAGE_SIZE - off);
> +		len -= sg->length;
> +		if (!i)
> +			off = 0;
> +	}
> +
> +	rsp->req.sg = rsp->cmd->inline_sg;
> +	rsp->req.sg_cnt = sg_count;
>   }
>   
>   static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
> @@ -544,7 +612,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
>   	if (!nvme_is_write(rsp->req.cmd))
>   		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>   
> -	if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
> +	if (off + len > rsp->queue->dev->inline_data_size) {
>   		pr_err("invalid inline data offset!\n");
>   		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
>   	}
> @@ -743,7 +811,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
>   	srq_size = 4095;	/* XXX: tune */
>   
>   	srq_attr.attr.max_wr = srq_size;
> -	srq_attr.attr.max_sge = 2;
> +	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
>   	srq_attr.attr.srq_limit = 0;
>   	srq_attr.srq_type = IB_SRQT_BASIC;
>   	srq = ib_create_srq(ndev->pd, &srq_attr);
> @@ -793,7 +861,10 @@ static void nvmet_rdma_free_dev(struct kref *ref)
>   static struct nvmet_rdma_device *
>   nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
>   {
> +	struct nvmet_port *port = cm_id->context;
>   	struct nvmet_rdma_device *ndev;
> +	int inline_page_count;
> +	int inline_sge_count;
>   	int ret;
>   
>   	mutex_lock(&device_list_mutex);
> @@ -807,6 +878,18 @@ static void nvmet_rdma_free_dev(struct kref *ref)
>   	if (!ndev)
>   		goto out_err;
>   
> +	inline_page_count = num_pages(port->inline_data_size);
> +	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
> +				cm_id->device->attrs.max_recv_sge) - 1;
> +	if (inline_page_count > inline_sge_count) {
> +		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
> +			port->inline_data_size, cm_id->device->name,
> +			inline_sge_count * PAGE_SIZE);
> +		port->inline_data_size = inline_sge_count * PAGE_SIZE;
> +		inline_page_count = inline_sge_count;
> +	}
> +	ndev->inline_data_size = port->inline_data_size;
> +	ndev->inline_page_count = inline_page_count;
>   	ndev->device = cm_id->device;
>   	kref_init(&ndev->ref);
>   
> @@ -881,7 +964,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
>   	} else {
>   		/* +1 for drain */
>   		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
> -		qp_attr.cap.max_recv_sge = 2;
> +		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
>   	}
>   
>   	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
> @@ -1379,6 +1462,15 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
>   		return -EINVAL;
>   	}
>   
> +	if (port->inline_data_size < 0) {
> +		port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
> +	} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
> +		pr_warn("inline_data_size %u is too large, reducing to %u\n",
> +			port->inline_data_size,
> +			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
> +		port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
> +	}
> +
>   	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
>   			port->disc_addr.trsvcid, &addr);
>   	if (ret) {
> @@ -1418,8 +1510,9 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
>   		goto out_destroy_id;
>   	}
>   
> -	pr_info("enabling port %d (%pISpcs)\n",
> -		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
> +	pr_info("enabling port %d (%pISpcs) inline_data_size %d\n",
> +		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr,
> +		port->inline_data_size);
>   	port->priv = cm_id;
>   	return 0;
>   
> @@ -1456,7 +1549,6 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
>   static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
>   	.owner			= THIS_MODULE,
>   	.type			= NVMF_TRTYPE_RDMA,
> -	.sqe_inline_size	= NVMET_RDMA_INLINE_DATA_SIZE,
>   	.msdbd			= 1,
>   	.has_keyed_sgls		= 1,
>   	.add_port		= nvmet_rdma_add_port,
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  2018-06-19 22:28   ` Max Gurtovoy
@ 2018-06-20 14:00     ` Steve Wise
  2018-06-21 14:37       ` Steve Wise
  0 siblings, 1 reply; 9+ messages in thread
From: Steve Wise @ 2018-06-20 14:00 UTC (permalink / raw)




On 6/19/2018 5:28 PM, Max Gurtovoy wrote:
>
>
> On 6/18/2018 9:22 PM, Steve Wise wrote:
>> The patch enables inline data sizes using up to 4 recv sges, and capping
>> the size at 16KB or at least 1 page size.? So on a 4K page system, up to
>> 16KB is supported, and for a 64K page system 1 page of 64KB is
>> supported.
>>
>> We avoid > 0 order page allocations for the inline buffers by using
>> multiple recv sges, one for each page.? If the device cannot support
>> the configured inline data size due to lack of enough recv sges, then
>> log a warning and reduce the inline size.
>>
>> Add a new configfs port attribute, called param_inline_data_size,
>> to allow configuring the size of inline data for a given nvmf port.
>> The maximum size allowed is still enforced by nvmet-rdma with
>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
>> And the default size, if not specified via configfs, is still PAGE_SIZE.
>> This preserves the existing behavior, but allows larger inline sizes
>> for small page systems.? If the configured inline data size exceeds
>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
>> reduced.? If param_inline_data_size is set to 0, then inline data is
>> disabled for that nvmf port.
>>
>> Signed-off-by: Steve Wise <swise at opengridcomputing.com>
>> ---
>> ? drivers/nvme/target/admin-cmd.c |?? 4 +-
>> ? drivers/nvme/target/configfs.c? |? 31 +++++++
>> ? drivers/nvme/target/core.c????? |?? 4 +
>> ? drivers/nvme/target/discovery.c |?? 2 +-
>> ? drivers/nvme/target/nvmet.h???? |?? 2 +-
>> ? drivers/nvme/target/rdma.c????? | 174
>> ++++++++++++++++++++++++++++++----------
>> ? 6 files changed, 172 insertions(+), 45 deletions(-)
>>
>> diff --git a/drivers/nvme/target/admin-cmd.c
>> b/drivers/nvme/target/admin-cmd.c
>> index 3880357..941a574 100644
>> --- a/drivers/nvme/target/admin-cmd.c
>> +++ b/drivers/nvme/target/admin-cmd.c
>> @@ -238,14 +238,14 @@ static void nvmet_execute_identify_ctrl(struct
>> nvmet_req *req)
>> ????? id->sgls = cpu_to_le32(1 << 0);??? /* we always support SGLs */
>> ????? if (ctrl->ops->has_keyed_sgls)
>> ????????? id->sgls |= cpu_to_le32(1 << 2);
>> -??? if (ctrl->ops->sqe_inline_size)
>> +??? if (req->port->inline_data_size)
>> ????????? id->sgls |= cpu_to_le32(1 << 20);
>> ? ????? strcpy(id->subnqn, ctrl->subsys->subsysnqn);
>> ? ????? /* Max command capsule size is sqe + single page of
>> in-capsule data */
>> ????? id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
>> -????????????????? ctrl->ops->sqe_inline_size) / 16);
>> +????????????????? req->port->inline_data_size) / 16);
>> ????? /* Max response capsule size is cqe */
>> ????? id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
>> ? diff --git a/drivers/nvme/target/configfs.c
>> b/drivers/nvme/target/configfs.c
>> index d3f3b3e..2e556f6 100644
>> --- a/drivers/nvme/target/configfs.c
>> +++ b/drivers/nvme/target/configfs.c
>> @@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct
>> config_item *item,
>> ? ? CONFIGFS_ATTR(nvmet_, addr_trsvcid);
>> ? +static ssize_t nvmet_param_inline_data_size_show(struct
>> config_item *item,
>> +??????? char *page)
>> +{
>> +??? struct nvmet_port *port = to_nvmet_port(item);
>> +
>> +??? return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
>> +}
>> +
>> +static ssize_t nvmet_param_inline_data_size_store(struct config_item
>> *item,
>> +??????? const char *page, size_t count)
>> +{
>> +??? struct nvmet_port *port = to_nvmet_port(item);
>> +??? int ret;
>> +
>> +??? if (port->enabled) {
>> +??????? pr_err("Cannot modify inline_data_size enabled\n");
>
> minor fix for the error print:
> "Cannot modify inline_data_size while enabled" or "Cannot modify
> inline_data_size while port enabled"
>

Thanks, I left out the "while".? But I'll use your second suggestion as
it is more clear.


Steve.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  2018-06-20 14:00     ` Steve Wise
@ 2018-06-21 14:37       ` Steve Wise
  2018-06-24  9:31         ` Max Gurtovoy
  0 siblings, 1 reply; 9+ messages in thread
From: Steve Wise @ 2018-06-21 14:37 UTC (permalink / raw)




On 6/20/2018 9:00 AM, Steve Wise wrote:
>
> On 6/19/2018 5:28 PM, Max Gurtovoy wrote:
>>
>> On 6/18/2018 9:22 PM, Steve Wise wrote:
>>> The patch enables inline data sizes using up to 4 recv sges, and capping
>>> the size at 16KB or at least 1 page size.? So on a 4K page system, up to
>>> 16KB is supported, and for a 64K page system 1 page of 64KB is
>>> supported.
>>>
>>> We avoid > 0 order page allocations for the inline buffers by using
>>> multiple recv sges, one for each page.? If the device cannot support
>>> the configured inline data size due to lack of enough recv sges, then
>>> log a warning and reduce the inline size.
>>>
>>> Add a new configfs port attribute, called param_inline_data_size,
>>> to allow configuring the size of inline data for a given nvmf port.
>>> The maximum size allowed is still enforced by nvmet-rdma with
>>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
>>> And the default size, if not specified via configfs, is still PAGE_SIZE.
>>> This preserves the existing behavior, but allows larger inline sizes
>>> for small page systems.? If the configured inline data size exceeds
>>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
>>> reduced.? If param_inline_data_size is set to 0, then inline data is
>>> disabled for that nvmf port.
>>>
>>> Signed-off-by: Steve Wise <swise at opengridcomputing.com>
>>> ---
>>> ? drivers/nvme/target/admin-cmd.c |?? 4 +-
>>> ? drivers/nvme/target/configfs.c? |? 31 +++++++
>>> ? drivers/nvme/target/core.c????? |?? 4 +
>>> ? drivers/nvme/target/discovery.c |?? 2 +-
>>> ? drivers/nvme/target/nvmet.h???? |?? 2 +-
>>> ? drivers/nvme/target/rdma.c????? | 174
>>> ++++++++++++++++++++++++++++++----------
>>> ? 6 files changed, 172 insertions(+), 45 deletions(-)
>>>
>>> diff --git a/drivers/nvme/target/admin-cmd.c
>>> b/drivers/nvme/target/admin-cmd.c
>>> index 3880357..941a574 100644
>>> --- a/drivers/nvme/target/admin-cmd.c
>>> +++ b/drivers/nvme/target/admin-cmd.c
>>> @@ -238,14 +238,14 @@ static void nvmet_execute_identify_ctrl(struct
>>> nvmet_req *req)
>>> ????? id->sgls = cpu_to_le32(1 << 0);??? /* we always support SGLs */
>>> ????? if (ctrl->ops->has_keyed_sgls)
>>> ????????? id->sgls |= cpu_to_le32(1 << 2);
>>> -??? if (ctrl->ops->sqe_inline_size)
>>> +??? if (req->port->inline_data_size)
>>> ????????? id->sgls |= cpu_to_le32(1 << 20);
>>> ? ????? strcpy(id->subnqn, ctrl->subsys->subsysnqn);
>>> ? ????? /* Max command capsule size is sqe + single page of
>>> in-capsule data */
>>> ????? id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
>>> -????????????????? ctrl->ops->sqe_inline_size) / 16);
>>> +????????????????? req->port->inline_data_size) / 16);
>>> ????? /* Max response capsule size is cqe */
>>> ????? id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
>>> ? diff --git a/drivers/nvme/target/configfs.c
>>> b/drivers/nvme/target/configfs.c
>>> index d3f3b3e..2e556f6 100644
>>> --- a/drivers/nvme/target/configfs.c
>>> +++ b/drivers/nvme/target/configfs.c
>>> @@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct
>>> config_item *item,
>>> ? ? CONFIGFS_ATTR(nvmet_, addr_trsvcid);
>>> ? +static ssize_t nvmet_param_inline_data_size_show(struct
>>> config_item *item,
>>> +??????? char *page)
>>> +{
>>> +??? struct nvmet_port *port = to_nvmet_port(item);
>>> +
>>> +??? return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
>>> +}
>>> +
>>> +static ssize_t nvmet_param_inline_data_size_store(struct config_item
>>> *item,
>>> +??????? const char *page, size_t count)
>>> +{
>>> +??? struct nvmet_port *port = to_nvmet_port(item);
>>> +??? int ret;
>>> +
>>> +??? if (port->enabled) {
>>> +??????? pr_err("Cannot modify inline_data_size enabled\n");
>> minor fix for the error print:
>> "Cannot modify inline_data_size while enabled" or "Cannot modify
>> inline_data_size while port enabled"
>>
> Thanks, I left out the "while".? But I'll use your second suggestion as
> it is more clear.
>

By the way, may I add your Reviewed-by tag for these 2 patches?

Thanks,

Steve.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
  2018-06-21 14:37       ` Steve Wise
@ 2018-06-24  9:31         ` Max Gurtovoy
  0 siblings, 0 replies; 9+ messages in thread
From: Max Gurtovoy @ 2018-06-24  9:31 UTC (permalink / raw)




On 6/21/2018 5:37 PM, Steve Wise wrote:
> 
> 
> On 6/20/2018 9:00 AM, Steve Wise wrote:
>>
>> On 6/19/2018 5:28 PM, Max Gurtovoy wrote:
>>>
>>> On 6/18/2018 9:22 PM, Steve Wise wrote:
>>>> The patch enables inline data sizes using up to 4 recv sges, and capping
>>>> the size at 16KB or at least 1 page size.? So on a 4K page system, up to
>>>> 16KB is supported, and for a 64K page system 1 page of 64KB is
>>>> supported.
>>>>
>>>> We avoid > 0 order page allocations for the inline buffers by using
>>>> multiple recv sges, one for each page.? If the device cannot support
>>>> the configured inline data size due to lack of enough recv sges, then
>>>> log a warning and reduce the inline size.
>>>>
>>>> Add a new configfs port attribute, called param_inline_data_size,
>>>> to allow configuring the size of inline data for a given nvmf port.
>>>> The maximum size allowed is still enforced by nvmet-rdma with
>>>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
>>>> And the default size, if not specified via configfs, is still PAGE_SIZE.
>>>> This preserves the existing behavior, but allows larger inline sizes
>>>> for small page systems.? If the configured inline data size exceeds
>>>> NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
>>>> reduced.? If param_inline_data_size is set to 0, then inline data is
>>>> disabled for that nvmf port.
>>>>
>>>> Signed-off-by: Steve Wise <swise at opengridcomputing.com>
>>>> ---
>>>>  ? drivers/nvme/target/admin-cmd.c |?? 4 +-
>>>>  ? drivers/nvme/target/configfs.c? |? 31 +++++++
>>>>  ? drivers/nvme/target/core.c????? |?? 4 +
>>>>  ? drivers/nvme/target/discovery.c |?? 2 +-
>>>>  ? drivers/nvme/target/nvmet.h???? |?? 2 +-
>>>>  ? drivers/nvme/target/rdma.c????? | 174
>>>> ++++++++++++++++++++++++++++++----------
>>>>  ? 6 files changed, 172 insertions(+), 45 deletions(-)
>>>>
>>>> diff --git a/drivers/nvme/target/admin-cmd.c
>>>> b/drivers/nvme/target/admin-cmd.c
>>>> index 3880357..941a574 100644
>>>> --- a/drivers/nvme/target/admin-cmd.c
>>>> +++ b/drivers/nvme/target/admin-cmd.c
>>>> @@ -238,14 +238,14 @@ static void nvmet_execute_identify_ctrl(struct
>>>> nvmet_req *req)
>>>>  ????? id->sgls = cpu_to_le32(1 << 0);??? /* we always support SGLs */
>>>>  ????? if (ctrl->ops->has_keyed_sgls)
>>>>  ????????? id->sgls |= cpu_to_le32(1 << 2);
>>>> -??? if (ctrl->ops->sqe_inline_size)
>>>> +??? if (req->port->inline_data_size)
>>>>  ????????? id->sgls |= cpu_to_le32(1 << 20);
>>>>  ? ????? strcpy(id->subnqn, ctrl->subsys->subsysnqn);
>>>>  ? ????? /* Max command capsule size is sqe + single page of
>>>> in-capsule data */
>>>>  ????? id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
>>>> -????????????????? ctrl->ops->sqe_inline_size) / 16);
>>>> +????????????????? req->port->inline_data_size) / 16);
>>>>  ????? /* Max response capsule size is cqe */
>>>>  ????? id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
>>>>  ? diff --git a/drivers/nvme/target/configfs.c
>>>> b/drivers/nvme/target/configfs.c
>>>> index d3f3b3e..2e556f6 100644
>>>> --- a/drivers/nvme/target/configfs.c
>>>> +++ b/drivers/nvme/target/configfs.c
>>>> @@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct
>>>> config_item *item,
>>>>  ? ? CONFIGFS_ATTR(nvmet_, addr_trsvcid);
>>>>  ? +static ssize_t nvmet_param_inline_data_size_show(struct
>>>> config_item *item,
>>>> +??????? char *page)
>>>> +{
>>>> +??? struct nvmet_port *port = to_nvmet_port(item);
>>>> +
>>>> +??? return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
>>>> +}
>>>> +
>>>> +static ssize_t nvmet_param_inline_data_size_store(struct config_item
>>>> *item,
>>>> +??????? const char *page, size_t count)
>>>> +{
>>>> +??? struct nvmet_port *port = to_nvmet_port(item);
>>>> +??? int ret;
>>>> +
>>>> +??? if (port->enabled) {
>>>> +??????? pr_err("Cannot modify inline_data_size enabled\n");
>>> minor fix for the error print:
>>> "Cannot modify inline_data_size while enabled" or "Cannot modify
>>> inline_data_size while port enabled"
>>>
>> Thanks, I left out the "while".? But I'll use your second suggestion as
>> it is more clear.
>>
> 
> By the way, may I add your Reviewed-by tag for these 2 patches?

Yes,

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>


I think for the future we can add another dir under <port> and push 
param_inline_data_size there with other configurable params that can be 
associated to port (such as mdts for example)

> 
> Thanks,
> 
> Steve.
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-06-24  9:31 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-19 19:09 [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Steve Wise
2018-06-18 18:19 ` [PATCH v5 1/2] nvme-rdma: support up to 4 segments of inline data Steve Wise
2018-06-18 18:22 ` [PATCH v5 2/2] nvmet-rdma: support max(16KB, PAGE_SIZE) " Steve Wise
2018-06-19 22:28   ` Max Gurtovoy
2018-06-20 14:00     ` Steve Wise
2018-06-21 14:37       ` Steve Wise
2018-06-24  9:31         ` Max Gurtovoy
2018-06-19 21:20 ` [PATCH v5 0/2] NVMF/RDMA 16K Inline Support Max Gurtovoy
2018-06-19 21:40   ` Steve Wise

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.