All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] nvme-fc: clean up error messages
@ 2019-06-24 22:12 Ewan D. Milne
  2019-06-25 16:37 ` James Smart
  0 siblings, 1 reply; 5+ messages in thread
From: Ewan D. Milne @ 2019-06-24 22:12 UTC (permalink / raw)


Some of the error messages are inconsistent, and one of them is wrong
(i.e. "queue_size 128 > ctrl maxcmd 32, reducing to queue_size").
Make them more clear and distinguishable for log analysis.

Signed-off-by: Ewan D. Milne <emilne at redhat.com>
---
 drivers/nvme/host/fc.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9b497d785ed7..ab162c57ddc2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1259,7 +1259,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
 	if (fcret) {
 		ret = -EBADF;
 		dev_err(ctrl->dev,
-			"q %d connect failed: %s\n",
+			"queue %d connect admin queue failed: %s\n",
 			queue->qnum, validation_errors[fcret]);
 	} else {
 		ctrl->association_id =
@@ -1358,7 +1358,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	if (fcret) {
 		ret = -EBADF;
 		dev_err(ctrl->dev,
-			"q %d connect failed: %s\n",
+			"queue %d connect queue failed: %s\n",
 			queue->qnum, validation_errors[fcret]);
 	} else {
 		queue->connection_id =
@@ -1371,7 +1371,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 out_no_memory:
 	if (ret)
 		dev_err(ctrl->dev,
-			"queue %d connect command failed (%d).\n",
+			"queue %d connect queue failed (%d).\n",
 			queue->qnum, ret);
 	return ret;
 }
@@ -2678,16 +2678,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 		/* warn if maxcmd is lower than queue_size */
 		dev_warn(ctrl->ctrl.device,
 			"queue_size %zu > ctrl maxcmd %u, reducing "
-			"to queue_size\n",
-			opts->queue_size, ctrl->ctrl.maxcmd);
+			"queue_size to %u\n",
+			opts->queue_size, ctrl->ctrl.maxcmd, ctrl->ctrl.maxcmd);
 		opts->queue_size = ctrl->ctrl.maxcmd;
 	}
 
 	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
 		/* warn if sqsize is lower than queue_size */
 		dev_warn(ctrl->ctrl.device,
-			"queue_size %zu > ctrl sqsize %u, clamping down\n",
-			opts->queue_size, ctrl->ctrl.sqsize + 1);
+			"queue_size %zu > ctrl sqsize %u, reducing "
+			"queue_size to %u\n",
+			opts->queue_size, ctrl->ctrl.sqsize + 1,
+			ctrl->ctrl.sqsize + 1);
 		opts->queue_size = ctrl->ctrl.sqsize + 1;
 	}
 
-- 
2.18.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2] nvme-fc: clean up error messages
  2019-06-24 22:12 [PATCH v2] nvme-fc: clean up error messages Ewan D. Milne
@ 2019-06-25 16:37 ` James Smart
  2019-06-25 19:05   ` Ewan D. Milne
  0 siblings, 1 reply; 5+ messages in thread
From: James Smart @ 2019-06-25 16:37 UTC (permalink / raw)




On 6/24/2019 3:12 PM, Ewan D. Milne wrote:
> Some of the error messages are inconsistent, and one of them is wrong
> (i.e. "queue_size 128 > ctrl maxcmd 32, reducing to queue_size").
> Make them more clear and distinguishable for log analysis.
>
> Signed-off-by: Ewan D. Milne <emilne at redhat.com>
> ---
>   drivers/nvme/host/fc.c | 16 +++++++++-------
>   1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index 9b497d785ed7..ab162c57ddc2 100644
> --- a/drivers/nvme/host/fc.c
> +++ b/drivers/nvme/host/fc.c
> @@ -1259,7 +1259,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
>   	if (fcret) {
>   		ret = -EBADF;
>   		dev_err(ctrl->dev,
> -			"q %d connect failed: %s\n",
> +			"queue %d connect admin queue failed: %s\n",
>   			queue->qnum, validation_errors[fcret]);
>   	} else {
>   		ctrl->association_id =

Since we're changing them...

how about "queue %d Create Association LS failed: %s"

> @@ -1358,7 +1358,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
>   	if (fcret) {
>   		ret = -EBADF;
>   		dev_err(ctrl->dev,
> -			"q %d connect failed: %s\n",
> +			"queue %d connect queue failed: %s\n",

how about "queue %d Create Connection LS failed: %s"

>   			queue->qnum, validation_errors[fcret]);
>   	} else {
>   		queue->connection_id =
> @@ -1371,7 +1371,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
>   out_no_memory:
>   	if (ret)
>   		dev_err(ctrl->dev,
> -			"queue %d connect command failed (%d).\n",
> +			"queue %d connect queue failed (%d).\n",
>   			queue->qnum, ret);
>   	return ret;
>   }
> @@ -2678,16 +2678,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
>   		/* warn if maxcmd is lower than queue_size */
>   		dev_warn(ctrl->ctrl.device,
>   			"queue_size %zu > ctrl maxcmd %u, reducing "
> -			"to queue_size\n",
> -			opts->queue_size, ctrl->ctrl.maxcmd);
> +			"queue_size to %u\n",
> +			opts->queue_size, ctrl->ctrl.maxcmd, ctrl->ctrl.maxcmd);
>   		opts->queue_size = ctrl->ctrl.maxcmd;
>   	}
>   
>   	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
>   		/* warn if sqsize is lower than queue_size */
>   		dev_warn(ctrl->ctrl.device,
> -			"queue_size %zu > ctrl sqsize %u, clamping down\n",
> -			opts->queue_size, ctrl->ctrl.sqsize + 1);
> +			"queue_size %zu > ctrl sqsize %u, reducing "
> +			"queue_size to %u\n",
> +			opts->queue_size, ctrl->ctrl.sqsize + 1,
> +			ctrl->ctrl.sqsize + 1);
>   		opts->queue_size = ctrl->ctrl.sqsize + 1;
>   	}
>   

Given these last two came from a template in the rdma transport - you 
may want to consider using the same wording in all the transports.

-- james

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] nvme-fc: clean up error messages
  2019-06-25 16:37 ` James Smart
@ 2019-06-25 19:05   ` Ewan D. Milne
  2019-06-25 19:55     ` James Smart
  0 siblings, 1 reply; 5+ messages in thread
From: Ewan D. Milne @ 2019-06-25 19:05 UTC (permalink / raw)


On Tue, 2019-06-25@09:37 -0700, James Smart wrote:
> 
> > @@ -1259,7 +1259,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
> >   	if (fcret) {
> >   		ret = -EBADF;
> >   		dev_err(ctrl->dev,
> > -			"q %d connect failed: %s\n",
> > +			"queue %d connect admin queue failed: %s\n",
> >   			queue->qnum, validation_errors[fcret]);
> >   	} else {
> >   		ctrl->association_id =
> 
> Since we're changing them...
> 
> how about "queue %d Create Association LS failed: %s"
> 
> > @@ -1358,7 +1358,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
> >   	if (fcret) {
> >   		ret = -EBADF;
> >   		dev_err(ctrl->dev,
> > -			"q %d connect failed: %s\n",
> > +			"queue %d connect queue failed: %s\n",
> 
> how about "queue %d Create Connection LS failed: %s"

Sure.

> 
> > @@ -2678,16 +2678,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
> >   		/* warn if maxcmd is lower than queue_size */
> >   		dev_warn(ctrl->ctrl.device,
> >   			"queue_size %zu > ctrl maxcmd %u, reducing "
> > -			"to queue_size\n",
> > -			opts->queue_size, ctrl->ctrl.maxcmd);
> > +			"queue_size to %u\n",
> > +			opts->queue_size, ctrl->ctrl.maxcmd, ctrl->ctrl.maxcmd);
> >   		opts->queue_size = ctrl->ctrl.maxcmd;
> >   	}
> >   
> >   	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
> >   		/* warn if sqsize is lower than queue_size */
> >   		dev_warn(ctrl->ctrl.device,
> > -			"queue_size %zu > ctrl sqsize %u, clamping down\n",
> > -			opts->queue_size, ctrl->ctrl.sqsize + 1);
> > +			"queue_size %zu > ctrl sqsize %u, reducing "
> > +			"queue_size to %u\n",
> > +			opts->queue_size, ctrl->ctrl.sqsize + 1,
> > +			ctrl->ctrl.sqsize + 1);
> >   		opts->queue_size = ctrl->ctrl.sqsize + 1;
> >   	}
> >   
> 
> Given these last two came from a template in the rdma transport - you 
> may want to consider using the same wording in all the transports.

Hmm.  The RDMA and TCP transports do not appear to actually change
opts->queue_size if > maxcmd, here, despite the warning message, but
they use sqsize when allocating the tagset.

And they both reduce the sqsize if > maxcmd, not if queue_size > sqsize
as in FC, (which does not change sqsize).

commit 5e77d61cbc7e766778037127dab69e6410a8fc48
Author: Sagi Grimberg <sagi at grimberg.me>
Date:   Tue Jun 19 15:34:13 2018 +0300

    nvme-rdma: don't override opts->queue_size

So should FC be using the same logic?

-Ewan

> 
> -- james
> 
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] nvme-fc: clean up error messages
  2019-06-25 19:05   ` Ewan D. Milne
@ 2019-06-25 19:55     ` James Smart
  2019-06-25 21:18       ` Sagi Grimberg
  0 siblings, 1 reply; 5+ messages in thread
From: James Smart @ 2019-06-25 19:55 UTC (permalink / raw)


On 6/25/2019 12:05 PM, Ewan D. Milne wrote:
> commit 5e77d61cbc7e766778037127dab69e6410a8fc48
> Author: Sagi Grimberg<sagi at grimberg.me>
> Date:   Tue Jun 19 15:34:13 2018 +0300
>
>      nvme-rdma: don't override opts->queue_size
>
> So should FC be using the same logic?

not sure - I'll have to look why Sagi reverted it.

-- james

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] nvme-fc: clean up error messages
  2019-06-25 19:55     ` James Smart
@ 2019-06-25 21:18       ` Sagi Grimberg
  0 siblings, 0 replies; 5+ messages in thread
From: Sagi Grimberg @ 2019-06-25 21:18 UTC (permalink / raw)



>> commit 5e77d61cbc7e766778037127dab69e6410a8fc48
>> Author: Sagi Grimberg<sagi at grimberg.me>
>> Date:?? Tue Jun 19 15:34:13 2018 +0300
>>
>> ???? nvme-rdma: don't override opts->queue_size
>>
>> So should FC be using the same logic?
> 
> not sure - I'll have to look why Sagi reverted it.

I reverted it because its a user parameter which we should
try to avoid modifying.

And maxcmd or other limits can theoretically change over resets.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-06-25 21:18 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-06-24 22:12 [PATCH v2] nvme-fc: clean up error messages Ewan D. Milne
2019-06-25 16:37 ` James Smart
2019-06-25 19:05   ` Ewan D. Milne
2019-06-25 19:55     ` James Smart
2019-06-25 21:18       ` Sagi Grimberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.