linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: "Javier González" <javier@javigon.com>
To: Chaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com>
Cc: linux-nvme@lists.infradead.org
Subject: Re: [PATCH] nvme: define struct for __nvme_submit_sync_cmd()
Date: Wed, 2 Oct 2019 07:47:06 +0200	[thread overview]
Message-ID: <58CA8A6F-46B6-4728-9EF4-ED3EAEEEEE2E@javigon.com> (raw)
In-Reply-To: <20191001231346.18518-1-chaitanya.kulkarni@wdc.com>


[-- Attachment #1.1: Type: text/plain, Size: 13271 bytes --]

> On 2 Oct 2019, at 01.13, Chaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com> wrote:
> 
> Over the period of time __nvme_submit_sync_cmd() function has grown to
> accept large number of paratements. The function __nvme_submit_sync_cmd()
> now takes 10 parameters. This patch consolidates all the parameters into
> one defined structure.
> 
> This makes calls to the same function easy to read and improves overall
> code readability.
> 
> Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
> ---
> Hi,
> 
> 	I've compile tested this patch. Once we agree on this change
> 	I'll send out a tested version.
> 
> Regards,
> Chaitanya
> ---
> drivers/nvme/host/core.c    | 78 +++++++++++++++++++++---------
> drivers/nvme/host/fabrics.c | 96 ++++++++++++++++++++++++++++---------
> drivers/nvme/host/nvme.h    | 18 +++++--
> 3 files changed, 142 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index e35615940365..9be51df8fb2f 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -769,32 +769,30 @@ static void nvme_execute_rq_polled(struct request_queue *q,
>  * Returns 0 on success.  If the result is negative, it's a Linux error code;
>  * if the result is positive, it's an NVM Express status code
>  */
> -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> -		union nvme_result *result, void *buffer, unsigned bufflen,
> -		unsigned timeout, int qid, int at_head,
> -		blk_mq_req_flags_t flags, bool poll)
> +int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d)
> {
> 	struct request *req;
> 	int ret;
> 
> -	req = nvme_alloc_request(q, cmd, flags, qid);
> +	req = nvme_alloc_request(d->q, d->cmd, d->flags, d->qid);
> 	if (IS_ERR(req))
> 		return PTR_ERR(req);
> 
> -	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
> +	req->timeout = d->timeout ? d->timeout : ADMIN_TIMEOUT;
> 
> -	if (buffer && bufflen) {
> -		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
> +	if (d->buffer && d->bufflen) {
> +		ret = blk_rq_map_kern(d->q, req, d->buffer, d->bufflen,
> +				      GFP_KERNEL);
> 		if (ret)
> 			goto out;
> 	}
> 
> -	if (poll)
> -		nvme_execute_rq_polled(req->q, NULL, req, at_head);
> +	if (d->poll)
> +		nvme_execute_rq_polled(req->q, NULL, req, d->at_head);
> 	else
> -		blk_execute_rq(req->q, NULL, req, at_head);
> -	if (result)
> -		*result = nvme_req(req)->result;
> +		blk_execute_rq(req->q, NULL, req, d->at_head);
> +	if (d->result)
> +		*(d->result) = nvme_req(req)->result;
> 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
> 		ret = -EINTR;
> 	else
> @@ -808,8 +806,20 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
> int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> 		void *buffer, unsigned bufflen)
> {
> -	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	struct nvme_submit_sync_data d = {
> +		.q = q,
> +		.cmd = cmd,
> +		.result = NULL,
> +		.buffer = buffer,
> +		.bufflen = bufflen,
> +		.timeout= 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> +
> +	return __nvme_submit_sync_cmd(&d);
> }
> EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
> 
> @@ -1114,19 +1124,30 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
> }
> 
> static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
> -		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
> +			 unsigned int dword11, void *buffer, size_t buflen,
> +			 u32 *result)
> {
> -	struct nvme_command c;
> +	struct nvme_command c = {};
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = dev->admin_q,
> +		.cmd = &c,
> +		.result = &res,
> +		.buffer = buffer,
> +		.bufflen = buflen,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false,
> +	};
> 
> -	memset(&c, 0, sizeof(c));
> 	c.features.opcode = op;
> 	c.features.fid = cpu_to_le32(fid);
> 	c.features.dword11 = cpu_to_le32(dword11);
> 
> -	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
> -			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret >= 0 && result)
> 		*result = le32_to_cpu(res.u32);
> 	return ret;
> @@ -1954,10 +1975,22 @@ static const struct pr_ops nvme_pr_ops = {
> 
> #ifdef CONFIG_BLK_SED_OPAL
> int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
> -		bool send)
> +		    bool send)
> {
> 	struct nvme_ctrl *ctrl = data;
> 	struct nvme_command cmd;
> +	struct nvme_submit_sync_data d = {
> +		.q = dev->admin_q,
> +		.cmd = &cmd,
> +		.result = NULL,
> +		.buffer = buffer,
> +		.bufflen = len,
> +		.timeout = ADMIN_TIMEOUT,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 1;
> +		.flags = 0;
> +		.poll = false;
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	if (send)
> @@ -1968,8 +2001,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
> 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
> 	cmd.common.cdw11 = cpu_to_le32(len);
> 
> -	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
> -				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
> +	return __nvme_submit_sync_cmd(&d);
> }
> EXPORT_SYMBOL_GPL(nvme_sec_submit);
> #endif /* CONFIG_BLK_SED_OPAL */
> diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
> index 74b8818ac9a1..c5a8aec5f046 100644
> --- a/drivers/nvme/host/fabrics.c
> +++ b/drivers/nvme/host/fabrics.c
> @@ -144,14 +144,25 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
> 	struct nvme_command cmd;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_get.opcode = nvme_fabrics_command;
> 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
> 	cmd.prop_get.offset = cpu_to_le32(off);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 
> 	if (ret >= 0)
> 		*val = le64_to_cpu(res.u64);
> @@ -190,6 +201,18 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
> 	struct nvme_command cmd;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_get.opcode = nvme_fabrics_command;
> @@ -197,9 +220,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
> 	cmd.prop_get.attrib = 1;
> 	cmd.prop_get.offset = cpu_to_le32(off);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> -
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret >= 0)
> 		*val = le64_to_cpu(res.u64);
> 	if (unlikely(ret != 0))
> @@ -235,6 +256,18 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
> {
> 	struct nvme_command cmd;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = NULL,
> +		.buffer = NULL,
> +		.bufflen = 0,
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 0,
> +		.flags = 0,
> +		.poll = false
> +	};
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.prop_set.opcode = nvme_fabrics_command;
> @@ -243,8 +276,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
> 	cmd.prop_set.offset = cpu_to_le32(off);
> 	cmd.prop_set.value = cpu_to_le64(val);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
> -			NVME_QID_ANY, 0, 0, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (unlikely(ret))
> 		dev_err(ctrl->device,
> 			"Property Set error: %d, offset %#x\n",
> @@ -366,10 +398,25 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
>  */
> int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
> {
> +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
> 	struct nvme_command cmd;
> 	union nvme_result res;
> -	struct nvmf_connect_data *data;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->fabrics_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = data,
> +		.bufflen = sizeof(data),
> +		.timeout = 0,
> +		.qid = NVME_QID_ANY,
> +		.at_head = 1,
> +		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
> +		.poll = false
> +	};
> +
> +	if (!data)
> +		return -ENOMEM;
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.connect.opcode = nvme_fabrics_command;
> @@ -387,18 +434,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
> 	if (ctrl->opts->disable_sqflow)
> 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
> 
> -	data = kzalloc(sizeof(*data), GFP_KERNEL);
> -	if (!data)
> -		return -ENOMEM;
> -
> 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
> 	data->cntlid = cpu_to_le16(0xffff);
> 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
> 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
> -			data, sizeof(*data), 0, NVME_QID_ANY, 1,
> -			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret) {
> 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
> 				       &cmd, data);
> @@ -436,10 +477,25 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
>  */
> int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
> {
> +	struct nvmf_connect_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
> 	struct nvme_command cmd;
> -	struct nvmf_connect_data *data;
> 	union nvme_result res;
> 	int ret;
> +	struct nvme_submit_sync_data d = {
> +		.q = ctrl->connect_q,
> +		.cmd = &cmd,
> +		.result = &res,
> +		.buffer = data,
> +		.bufflen = sizeof(*data),
> +		.timeout = 0,
> +		.qid = qid,
> +		.at_head = 1,
> +		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
> +		.poll = poll
> +	};
> +
> +	if (!data)
> +		return -ENOMEM;
> 
> 	memset(&cmd, 0, sizeof(cmd));
> 	cmd.connect.opcode = nvme_fabrics_command;
> @@ -450,18 +506,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
> 	if (ctrl->opts->disable_sqflow)
> 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
> 
> -	data = kzalloc(sizeof(*data), GFP_KERNEL);
> -	if (!data)
> -		return -ENOMEM;
> -
> 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
> 	data->cntlid = cpu_to_le16(ctrl->cntlid);
> 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
> 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
> 
> -	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
> -			data, sizeof(*data), 0, qid, 1,
> -			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
> +	ret = __nvme_submit_sync_cmd(&d);
> 	if (ret) {
> 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
> 				       &cmd, data);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index 38a83ef5bcd3..f172d8e02fc6 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -395,6 +395,19 @@ struct nvme_ctrl_ops {
> 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
> };
> 
> +struct nvme_submit_sync_data {
> +	struct request_queue *q;
> +	struct nvme_command *cmd;
> +	union nvme_result *result;
> +	void *buffer;
> +	unsigned bufflen;
> +	unsigned timeout;
> +	int qid;
> +	int at_head;
> +	blk_mq_req_flags_t flags;
> +	bool poll;
> +};
> +
> #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
> void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
> 			    const char *dev_name);
> @@ -485,10 +498,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
> 		struct nvme_command *cmd);
> int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> 		void *buf, unsigned bufflen);
> -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
> -		union nvme_result *result, void *buffer, unsigned bufflen,
> -		unsigned timeout, int qid, int at_head,
> -		blk_mq_req_flags_t flags, bool poll);
> +int __nvme_submit_sync_cmd(struct nvme_submit_sync_data *d);
> int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
> 		      unsigned int dword11, void *buffer, size_t buflen,
> 		      u32 *result);
> --
> 2.22.1
> 
> 
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme

Makes sense to me. We have other places with similar arguments (e.g.,
nvme_submit_user_cmd). Would it make sense to unify this too if we move
in this direction?


Javier

[-- Attachment #1.2: Message signed with OpenPGP --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

[-- Attachment #2: Type: text/plain, Size: 158 bytes --]

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2019-10-02  5:47 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-01 23:13 [PATCH] nvme: define struct for __nvme_submit_sync_cmd() Chaitanya Kulkarni
2019-10-02  5:47 ` Javier González [this message]
2019-10-02  6:10   ` Chaitanya Kulkarni
2019-10-05 14:48     ` Keith Busch
2019-10-06 10:13       ` Christoph Hellwig
2019-10-05  0:09 ` Sagi Grimberg
2019-10-05  2:09   ` Balbir Singh
2019-10-06 10:12 ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=58CA8A6F-46B6-4728-9EF4-ED3EAEEEEE2E@javigon.com \
    --to=javier@javigon.com \
    --cc=Chaitanya.Kulkarni@wdc.com \
    --cc=linux-nvme@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).