Linux-NVME Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
@ 2019-10-01 23:13 Chaitanya Kulkarni
  2019-10-02  5:47 ` Javier González
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Chaitanya Kulkarni @ 2019-10-01 23:13 UTC (permalink / raw)
  To: linux-nvme; +Cc: Chaitanya Kulkarni

Over the period of time __nvme_submit_sync_cmd() function has grown to
accept large number of paratements. The function __nvme_submit_sync_cmd()
now takes 10 parameters. This patch consolidates all the parameters into
one defined structure.

This makes calls to the same function easy to read and improves overall
code readability.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
Hi,

	I've compile tested this patch. Once we agree on this change
	I'll send out a tested version.

Regards,
Chaitanya
---
 drivers/nvme/host/core.c    | 78 +++++++++++++++++++++---------
 drivers/nvme/host/fabrics.c | 96 ++++++++++++++++++++++++++++---------
 drivers/nvme/host/nvme.h    | 18 +++++--
 3 files changed, 142 insertions(+), 50 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e35615940365..9be51df8fb2f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -769,32 +769,30 @@ static void nvme_execute_rq_polled(struct request_queue *q,
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
  */
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		union nvme_result *result, void *buffer, unsigned bufflen,
-		unsigned timeout, int qid, int at_head,
-		blk_mq_req_flags_t flags, bool poll)
+int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d)
 {
 	struct request *req;
 	int ret;
 
-	req = nvme_alloc_request(q, cmd, flags, qid);
+	req = nvme_alloc_request(d->q, d->cmd, d->flags, d->qid);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+	req->timeout = d->timeout ? d->timeout : ADMIN_TIMEOUT;
 
-	if (buffer && bufflen) {
-		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+	if (d->buffer && d->bufflen) {
+		ret = blk_rq_map_kern(d->q, req, d->buffer, d->bufflen,
+				      GFP_KERNEL);
 		if (ret)
 			goto out;
 	}
 
-	if (poll)
-		nvme_execute_rq_polled(req->q, NULL, req, at_head);
+	if (d->poll)
+		nvme_execute_rq_polled(req->q, NULL, req, d->at_head);
 	else
-		blk_execute_rq(req->q, NULL, req, at_head);
-	if (result)
-		*result = nvme_req(req)->result;
+		blk_execute_rq(req->q, NULL, req, d->at_head);
+	if (d->result)
+		*(d->result) = nvme_req(req)->result;
 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
 		ret = -EINTR;
 	else
@@ -808,8 +806,20 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buffer, unsigned bufflen)
 {
-	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
-			NVME_QID_ANY, 0, 0, false);
+	struct nvme_submit_sync_data d = {
+		.q = q,
+		.cmd = cmd,
+		.result = NULL,
+		.buffer = buffer,
+		.bufflen = bufflen,
+		.timeout= 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 0,
+		.flags = 0,
+		.poll = false
+	};
+
+	return __nvme_submit_sync_cmd(&d);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -1114,19 +1124,30 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
 }
 
 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
-		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
+			 unsigned int dword11, void *buffer, size_t buflen,
+			 u32 *result)
 {
-	struct nvme_command c;
+	struct nvme_command c = {};
 	union nvme_result res;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = dev->admin_q,
+		.cmd = &c,
+		.result = &res,
+		.buffer = buffer,
+		.bufflen = buflen,
+		.timeout = 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 0,
+		.flags = 0,
+		.poll = false,
+	};
 
-	memset(&c, 0, sizeof(c));
 	c.features.opcode = op;
 	c.features.fid = cpu_to_le32(fid);
 	c.features.dword11 = cpu_to_le32(dword11);
 
-	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
-			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret >= 0 && result)
 		*result = le32_to_cpu(res.u32);
 	return ret;
@@ -1954,10 +1975,22 @@ static const struct pr_ops nvme_pr_ops = {
 
 #ifdef CONFIG_BLK_SED_OPAL
 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
-		bool send)
+		    bool send)
 {
 	struct nvme_ctrl *ctrl = data;
 	struct nvme_command cmd;
+	struct nvme_submit_sync_data d = {
+		.q = dev->admin_q,
+		.cmd = &cmd,
+		.result = NULL,
+		.buffer = buffer,
+		.bufflen = len,
+		.timeout = ADMIN_TIMEOUT,
+		.qid = NVME_QID_ANY,
+		.at_head = 1;
+		.flags = 0;
+		.poll = false;
+	};
 
 	memset(&cmd, 0, sizeof(cmd));
 	if (send)
@@ -1968,8 +2001,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
 	cmd.common.cdw11 = cpu_to_le32(len);
 
-	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
-				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
+	return __nvme_submit_sync_cmd(&d);
 }
 EXPORT_SYMBOL_GPL(nvme_sec_submit);
 #endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 74b8818ac9a1..c5a8aec5f046 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -144,14 +144,25 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 	struct nvme_command cmd;
 	union nvme_result res;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.buffer = NULL,
+		.bufflen = 0,
+		.timeout = 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 0,
+		.flags = 0,
+		.poll = false
+	};
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.prop_get.opcode = nvme_fabrics_command;
 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
-			NVME_QID_ANY, 0, 0, false);
+	ret = __nvme_submit_sync_cmd(&d);
 
 	if (ret >= 0)
 		*val = le64_to_cpu(res.u64);
@@ -190,6 +201,18 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	struct nvme_command cmd;
 	union nvme_result res;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.buffer = NULL,
+		.bufflen = 0,
+		.timeout = 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 0,
+		.flags = 0,
+		.poll = false
+	};
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.prop_get.opcode = nvme_fabrics_command;
@@ -197,9 +220,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	cmd.prop_get.attrib = 1;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
-			NVME_QID_ANY, 0, 0, false);
-
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret >= 0)
 		*val = le64_to_cpu(res.u64);
 	if (unlikely(ret != 0))
@@ -235,6 +256,18 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 {
 	struct nvme_command cmd;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = NULL,
+		.buffer = NULL,
+		.bufflen = 0,
+		.timeout = 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 0,
+		.flags = 0,
+		.poll = false
+	};
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.prop_set.opcode = nvme_fabrics_command;
@@ -243,8 +276,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 	cmd.prop_set.offset = cpu_to_le32(off);
 	cmd.prop_set.value = cpu_to_le64(val);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
-			NVME_QID_ANY, 0, 0, false);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (unlikely(ret))
 		dev_err(ctrl->device,
 			"Property Set error: %d, offset %#x\n",
@@ -366,10 +398,25 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
  */
 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 {
+	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
 	struct nvme_command cmd;
 	union nvme_result res;
-	struct nvmf_connect_data *data;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.buffer = data,
+		.bufflen = sizeof(data),
+		.timeout = 0,
+		.qid = NVME_QID_ANY,
+		.at_head = 1,
+		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
+		.poll = false
+	};
+
+	if (!data)
+		return -ENOMEM;
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.connect.opcode = nvme_fabrics_command;
@@ -387,18 +434,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	if (ctrl->opts->disable_sqflow)
 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(0xffff);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
-			data, sizeof(*data), 0, NVME_QID_ANY, 1,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret) {
 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
@@ -436,10 +477,25 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
  */
 int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
 {
+	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
 	struct nvme_command cmd;
-	struct nvmf_connect_data *data;
 	union nvme_result res;
 	int ret;
+	struct nvme_submit_sync_data d = {
+		.q = ctrl->connect_q,
+		.cmd = &cmd,
+		.result = &res,
+		.buffer = data,
+		.bufflen = sizeof(*data),
+		.timeout = 0,
+		.qid = qid,
+		.at_head = 1,
+		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
+		.poll = poll
+	};
+
+	if (!data)
+		return -ENOMEM;
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.connect.opcode = nvme_fabrics_command;
@@ -450,18 +506,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
 	if (ctrl->opts->disable_sqflow)
 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(ctrl->cntlid);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
-			data, sizeof(*data), 0, qid, 1,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret) {
 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 38a83ef5bcd3..f172d8e02fc6 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -395,6 +395,19 @@ struct nvme_ctrl_ops {
 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 };
 
+struct nvme_submit_sync_data {
+	struct request_queue *q;
+	struct nvme_command *cmd;
+	union nvme_result *result;
+	void *buffer;
+	unsigned bufflen;
+	unsigned timeout;
+	int qid;
+	int at_head;
+	blk_mq_req_flags_t flags;
+	bool poll;
+};
+
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
 			    const char *dev_name);
@@ -485,10 +498,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		union nvme_result *result, void *buffer, unsigned bufflen,
-		unsigned timeout, int qid, int at_head,
-		blk_mq_req_flags_t flags, bool poll);
+int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
 		      unsigned int dword11, void *buffer, size_t buflen,
 		      u32 *result);
-- 
2.22.1


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-01 23:13 [PATCH] nvme: define struct for __nvme_submit_sync_cmd() Chaitanya Kulkarni
@ 2019-10-02  5:47 ` Javier González
  2019-10-02  6:10   ` Chaitanya Kulkarni
  2019-10-05  0:09 ` Sagi Grimberg
  2019-10-06 10:12 ` Christoph Hellwig
  2 siblings, 1 reply; 8+ messages in thread
From: Javier González @ 2019-10-02  5:47 UTC (permalink / raw)
  To: Chaitanya Kulkarni; +Cc: linux-nvme

[-- Attachment #1.1: Type: text/plain, Size: 13271 bytes --]

> On 2 Oct 2019, at 01.13, Chaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com> wrote:
> 
> Over the period of time __nvme_submit_sync_cmd() function has grown to
> accept large number of paratements. The function __nvme_submit_sync_cmd()
> now takes 10 parameters. This patch consolidates all the parameters into
> one defined structure.
> 
> This makes calls to the same function easy to read and improves overall
> code readability.
> 
> Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
> ---
> Hi,
> 
> 	I've compile tested this patch. Once we agree on this change
> 	I'll send out a tested version.
> 
> Regards,
> Chaitanya
> ---
> drivers/nvme/host/core.c    | 78 +++++++++++++++++++++---------
> drivers/nvme/host/fabrics.c | 96 ++++++++++++++++++++++++++++---------
> drivers/nvme/host/nvme.h    | 18 +++++--
> 3 files changed, 142 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index e35615940365..9be51df8fb2f 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -769,32 +769,30 @@ static void nvme_execute_rq_polled(struct request_queue *q,
>  * Returns 0 on success.  If the result is negative, it's a Linux error code;
>  * if the result is positive, it's an NVM Express status code
>  */
> -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> -		union nvme_result *result, void *buffer, unsigned bufflen,
> -		unsigned timeout, int qid, int at_head,
> -		blk_mq_req_flags_t flags, bool poll)
> +int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d)
> {
> 	struct request *req;
> 	int ret;
> 
> -	req = nvme_alloc_request(q, cmd, flags, qid);
> +	req = nvme_alloc_request(d->q, d->cmd, d->flags, d->qid);
> 	if (IS_ERR(req))
> 		return PTR_ERR(req);
> 
> -	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
> +	req->timeout = d->timeout ? d->timeout : ADMIN_TIMEOUT;
> 
> -	if (buffer && bufflen) {
> -		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
> +	if (d->buffer && d->bufflen) {
> +		ret = blk_rq_map_kern(d->q, req, d->buffer, d->bufflen,
> +				      GFP_KERNEL);
> 		if (ret)
> 			goto out;
> 	}
> 
> -	if (poll)
> -		nvme_execute_rq_polled(req->q, NULL, req, at_head);
> +	if (d->poll)
> +		nvme_execute_rq_polled(req->q, NULL, req, d->at_head);
> 	else
> -		blk_execute_rq(req->q, NULL, req, at_head);
> -	if (result)
> -		*result = nvme_req(req)->result;
> +		blk_execute_rq(req->q, NULL, req, d->at_head);
> +	if (d->result)
> +		*(d->result) = nvme_req(req)->result;
> 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
> 		ret = -EINTR;
> 	else
> @@ -808,8 +806,20 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
> int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> 		void *buffer, unsigned bufflen)
> {
> -	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	struct nvme_submit_sync_data d = {
> +		.q = q,
> +		.cmd = cmd,
> +		.result = NULL,
> +		.buffer = buffer,
> +		.bufflen = bufflen,
> +		.timeout= 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> +
> +	return __nvme_submit_sync_cmd(&d);
> }
> EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
> 
> @@ -1114,19 +1124,30 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
> }
> 
> static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
> -		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
> +			 unsigned int dword11, void *buffer, size_t buflen,
> +			 u32 *result)
> {
> -	struct nvme_command c;
> +	struct nvme_command c = {};
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = dev->admin_q,
> +		.cmd = &c,
> +		.result = &res,
> +		.buffer = buffer,
> +		.bufflen = buflen,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false,
> +	};
> 
> -	memset(&c, 0, sizeof(c));
> 	c.features.opcode = op;
> 	c.features.fid = cpu_to_le32(fid);
> 	c.features.dword11 = cpu_to_le32(dword11);
> 
> -	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
> -			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret >= 0 && result)
> 		*result = le32_to_cpu(res.u32);
> 	return ret;
> @@ -1954,10 +1975,22 @@ static const struct pr_ops nvme_pr_ops = {
> 
> #ifdef CONFIG_BLK_SED_OPAL
> int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
> -		bool send)
> +		    bool send)
> {
> 	struct nvme_ctrl *ctrl = data;
> 	struct nvme_command cmd;
> +	struct nvme_submit_sync_data d = {
> +		.q = dev->admin_q,
> +		.cmd = &cmd,
> +		.result = NULL,
> +		.buffer = buffer,
> +		.bufflen = len,
> +		.timeout = ADMIN_TIMEOUT,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 1;
> +		.flags = 0;
> +		.poll = false;
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	if (send)
> @@ -1968,8 +2001,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
> 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
> 	cmd.common.cdw11 = cpu_to_le32(len);
> 
> -	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
> -				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
> +	return __nvme_submit_sync_cmd(&d);
> }
> EXPORT_SYMBOL_GPL(nvme_sec_submit);
> #endif /* CONFIG_BLK_SED_OPAL */
> diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
> index 74b8818ac9a1..c5a8aec5f046 100644
> --- a/drivers/nvme/host/fabrics.c
> +++ b/drivers/nvme/host/fabrics.c
> @@ -144,14 +144,25 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
> 	struct nvme_command cmd;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_get.opcode = nvme_fabrics_command;
> 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
> 	cmd.prop_get.offset = cpu_to_le32(off);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 
> 	if (ret >= 0)
> 		*val = le64_to_cpu(res.u64);
> @@ -190,6 +201,18 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
> 	struct nvme_command cmd;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_get.opcode = nvme_fabrics_command;
> @@ -197,9 +220,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
> 	cmd.prop_get.attrib = 1;
> 	cmd.prop_get.offset = cpu_to_le32(off);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> -
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret >= 0)
> 		*val = le64_to_cpu(res.u64);
> 	if (unlikely(ret != 0))
> @@ -235,6 +256,18 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
> {
> 	struct nvme_command cmd;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = NULL,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_set.opcode = nvme_fabrics_command;
> @@ -243,8 +276,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
> 	cmd.prop_set.offset = cpu_to_le32(off);
> 	cmd.prop_set.value = cpu_to_le64(val);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (unlikely(ret))
> 		dev_err(ctrl->device,
> 			"Property Set error: %d, offset %#x\n",
> @@ -366,10 +398,25 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
>  */
> int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
> {
> +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
> 	struct nvme_command cmd;
> 	union nvme_result res;
> -	struct nvmf_connect_data *data;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = data,
> +		.bufflen = sizeof(data),
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 1,
> +		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
> +		.poll = false
> +	};
> +
> +	if (!data)
> +		return -ENOMEM;
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.connect.opcode = nvme_fabrics_command;
> @@ -387,18 +434,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
> 	if (ctrl->opts->disable_sqflow)
> 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
> 
> -	data = kzalloc(sizeof(*data), GFP_KERNEL);
> -	if (!data)
> -		return -ENOMEM;
> -
> 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
> 	data->cntlid = cpu_to_le16(0xffff);
> 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
> 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
> -			data, sizeof(*data), 0, NVME_QID_ANY, 1,
> -			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret) {
> 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
> 				       &cmd, data);
> @@ -436,10 +477,25 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
>  */
> int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
> {
> +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
> 	struct nvme_command cmd;
> -	struct nvmf_connect_data *data;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->connect_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = data,
> +		.bufflen = sizeof(*data),
> +		.timeout = 0,
> +		.qid = qid,
> +		.at_head = 1,
> +		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
> +		.poll = poll
> +	};
> +
> +	if (!data)
> +		return -ENOMEM;
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.connect.opcode = nvme_fabrics_command;
> @@ -450,18 +506,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
> 	if (ctrl->opts->disable_sqflow)
> 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
> 
> -	data = kzalloc(sizeof(*data), GFP_KERNEL);
> -	if (!data)
> -		return -ENOMEM;
> -
> 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
> 	data->cntlid = cpu_to_le16(ctrl->cntlid);
> 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
> 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
> -			data, sizeof(*data), 0, qid, 1,
> -			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret) {
> 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
> 				       &cmd, data);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index 38a83ef5bcd3..f172d8e02fc6 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -395,6 +395,19 @@ struct nvme_ctrl_ops {
> 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
> };
> 
> +struct nvme_submit_sync_data {
> +	struct request_queue *q;
> +	struct nvme_command *cmd;
> +	union nvme_result *result;
> +	void *buffer;
> +	unsigned bufflen;
> +	unsigned timeout;
> +	int qid;
> +	int at_head;
> +	blk_mq_req_flags_t flags;
> +	bool poll;
> +};
> +
> #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
> void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
> 			    const char *dev_name);
> @@ -485,10 +498,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
> 		struct nvme_command *cmd);
> int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> 		void *buf, unsigned bufflen);
> -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> -		union nvme_result *result, void *buffer, unsigned bufflen,
> -		unsigned timeout, int qid, int at_head,
> -		blk_mq_req_flags_t flags, bool poll);
> +int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d);
> int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
> 		      unsigned int dword11, void *buffer, size_t buflen,
> 		      u32 *result);
> --
> 2.22.1
> 
> 
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme

Makes sense to me. We have other places with similar arguments (e.g.,
nvme_submit_user_cmd). Would it make sense to unify this too if we move
in this direction?


Javier

[-- Attachment #1.2: Message signed with OpenPGP --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

[-- Attachment #2: Type: text/plain, Size: 158 bytes --]

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-02  5:47 ` Javier González
@ 2019-10-02  6:10   ` Chaitanya Kulkarni
  2019-10-05 14:48     ` Keith Busch
  0 siblings, 1 reply; 8+ messages in thread
From: Chaitanya Kulkarni @ 2019-10-02  6:10 UTC (permalink / raw)
  To: Javier González; +Cc: linux-nvme

On 10/1/19 10:47 PM, Javier González wrote:
> Makes sense to me. We have other places with similar arguments (e.g.,
> nvme_submit_user_cmd). Would it make sense to unify this too if we move
> in this direction?
> 
Thanks for the feedback, I'll add that too in the next version.


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-01 23:13 [PATCH] nvme: define struct for __nvme_submit_sync_cmd() Chaitanya Kulkarni
  2019-10-02  5:47 ` Javier González
@ 2019-10-05  0:09 ` Sagi Grimberg
  2019-10-05  2:09   ` Balbir Singh
  2019-10-06 10:12 ` Christoph Hellwig
  2 siblings, 1 reply; 8+ messages in thread
From: Sagi Grimberg @ 2019-10-05  0:09 UTC (permalink / raw)
  To: Chaitanya Kulkarni, linux-nvme


> Over the period of time __nvme_submit_sync_cmd() function has grown to
> accept large number of paratements. The function __nvme_submit_sync_cmd()
> now takes 10 parameters. This patch consolidates all the parameters into
> one defined structure.
> 
> This makes calls to the same function easy to read and improves overall
> code readability.

Personally I'm not a big fan... But I'm not going to reject it either...

What do others think?

[...]

> @@ -366,10 +398,25 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
>    */
>   int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
>   {
> +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);

Please don't. I'm allergic to allocation on declaration.
Just set d.buffer after the initializer.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-05  0:09 ` Sagi Grimberg
@ 2019-10-05  2:09   ` Balbir Singh
  0 siblings, 0 replies; 8+ messages in thread
From: Balbir Singh @ 2019-10-05  2:09 UTC (permalink / raw)
  To: Sagi Grimberg, Chaitanya Kulkarni, linux-nvme

On Fri, 2019-10-04 at 17:09 -0700, Sagi Grimberg wrote:
> > Over the period of time __nvme_submit_sync_cmd() function has grown to
> > accept large number of paratements. The function __nvme_submit_sync_cmd()
> > now takes 10 parameters. This patch consolidates all the parameters into
> > one defined structure.
> > 
> > This makes calls to the same function easy to read and improves overall
> > code readability.
> 
> Personally I'm not a big fan... But I'm not going to reject it either...
> 
> What do others think?
> 
> [...]
> 

The patch itself is not the cleanest, smaller well defined refactoring would
be much better - at first glance

Balbir Singh.

> > @@ -366,10 +398,25 @@ static void nvmf_log_connect_error(struct nvme_ctrl
> > *ctrl,
> >    */
> >   int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
> >   {
> > +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
> 
> Please don't. I'm allergic to allocation on declaration.
> Just set d.buffer after the initializer.
> 
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-02  6:10   ` Chaitanya Kulkarni
@ 2019-10-05 14:48     ` Keith Busch
  2019-10-06 10:13       ` Christoph Hellwig
  0 siblings, 1 reply; 8+ messages in thread
From: Keith Busch @ 2019-10-05 14:48 UTC (permalink / raw)
  To: Chaitanya Kulkarni; +Cc: Javier González, linux-nvme

On Wed, Oct 02, 2019 at 06:10:09AM +0000, Chaitanya Kulkarni wrote:
> On 10/1/19 10:47 PM, Javier González wrote:
> > Makes sense to me. We have other places with similar arguments (e.g.,
> > nvme_submit_user_cmd). Would it make sense to unify this too if we move
> > in this direction?
> > 
> Thanks for the feedback, I'll add that too in the next version.

It also looks like much of the callers can use the much simpler
nvme_submit_user_cmd() that already provides defaults to the numerous
parameters.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-01 23:13 [PATCH] nvme: define struct for __nvme_submit_sync_cmd() Chaitanya Kulkarni
  2019-10-02  5:47 ` Javier González
  2019-10-05  0:09 ` Sagi Grimberg
@ 2019-10-06 10:12 ` Christoph Hellwig
  2 siblings, 0 replies; 8+ messages in thread
From: Christoph Hellwig @ 2019-10-06 10:12 UTC (permalink / raw)
  To: Chaitanya Kulkarni; +Cc: linux-nvme

On Tue, Oct 01, 2019 at 04:13:46PM -0700, Chaitanya Kulkarni wrote:
> Over the period of time __nvme_submit_sync_cmd() function has grown to
> accept large number of paratements. The function __nvme_submit_sync_cmd()
> now takes 10 parameters. This patch consolidates all the parameters into
> one defined structure.
> 
> This makes calls to the same function easy to read and improves overall
> code readability.

This makes the code both a lot more unreadable and a lot larger.  What
is the point?

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
  2019-10-05 14:48     ` Keith Busch
@ 2019-10-06 10:13       ` Christoph Hellwig
  0 siblings, 0 replies; 8+ messages in thread
From: Christoph Hellwig @ 2019-10-06 10:13 UTC (permalink / raw)
  To: Keith Busch; +Cc: Javier González, linux-nvme, Chaitanya Kulkarni

On Sat, Oct 05, 2019 at 08:48:12AM -0600, Keith Busch wrote:
> On Wed, Oct 02, 2019 at 06:10:09AM +0000, Chaitanya Kulkarni wrote:
> > On 10/1/19 10:47 PM, Javier González wrote:
> > > Makes sense to me. We have other places with similar arguments (e.g.,
> > > nvme_submit_user_cmd). Would it make sense to unify this too if we move
> > > in this direction?
> > > 
> > Thanks for the feedback, I'll add that too in the next version.
> 
> It also looks like much of the callers can use the much simpler
> nvme_submit_user_cmd() that already provides defaults to the numerous
> parameters.

That on the other hand actually seems like a useful change.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, back to index

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-01 23:13 [PATCH] nvme: define struct for __nvme_submit_sync_cmd() Chaitanya Kulkarni
2019-10-02  5:47 ` Javier González
2019-10-02  6:10   ` Chaitanya Kulkarni
2019-10-05 14:48     ` Keith Busch
2019-10-06 10:13       ` Christoph Hellwig
2019-10-05  0:09 ` Sagi Grimberg
2019-10-05  2:09   ` Balbir Singh
2019-10-06 10:12 ` Christoph Hellwig

Linux-NVME Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-nvme/0 linux-nvme/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-nvme linux-nvme/ https://lore.kernel.org/linux-nvme \
		linux-nvme@lists.infradead.org linux-nvme@archiver.kernel.org
	public-inbox-index linux-nvme

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.infradead.lists.linux-nvme


AGPL code for this site: git clone https://public-inbox.org/ public-inbox