All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC] nvme/fc: sq flow control
@ 2020-02-25 23:59 Hannes Reinecke
  2020-02-26  0:08 ` Sagi Grimberg
                   ` (3 more replies)
  0 siblings, 4 replies; 16+ messages in thread
From: Hannes Reinecke @ 2020-02-25 23:59 UTC (permalink / raw)
  To: Keith Busch
  Cc: Sagi Grimberg, Chaitanya Kulkarni, James Smart, linux-nvme,
	Hannes Reinecke, John Meneghini

As per NVMe-oF spec sq flow control is actually mandatory, and we should
be implementing it to avoid the controller to return a fatal status
error, and try to play nicely with controllers using sq flow control
to implement QoS.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/host/fc.c | 29 ++++++++++++++++++++++++++++-
 1 file changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a19ddb61039d..628397bd5065 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -12,6 +12,7 @@
 
 #include "nvme.h"
 #include "fabrics.h"
+#include "trace.h"
 #include <linux/nvme-fc-driver.h>
 #include <linux/nvme-fc.h>
 #include <scsi/scsi_transport_fc.h>
@@ -34,7 +35,8 @@ struct nvme_fc_queue {
 	size_t			cmnd_capsule_len;
 	u32			qnum;
 	u32			seqno;
-
+	int			sq_head;
+	int			sq_tail;
 	u64			connection_id;
 	atomic_t		csn;
 
@@ -1671,6 +1673,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 				cqe->command_id);
 			goto done;
 		}
+		WRITE_ONCE(queue->sq_head, cpu_to_le16(cqe->sq_head));
+		trace_nvme_sq(rq, cqe->sq_head, queue->sq_tail);
 		result = cqe->result;
 		status = cqe->status;
 		break;
@@ -2177,6 +2181,18 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
 	freq->sg_cnt = 0;
 }
 
+static int nvme_fc_update_sq_tail(struct nvme_fc_queue *queue, int incr)
+{
+	int old_sqtl, new_sqtl;
+
+	do {
+		old_sqtl = queue->sq_tail;
+		new_sqtl = (old_sqtl + incr) % queue->ctrl->ctrl.sqsize;
+	} while (cmpxchg(&queue->sq_tail, old_sqtl, new_sqtl) !=
+		 old_sqtl);
+	return new_sqtl;
+}
+
 /*
  * In FC, the queue is a logical thing. At transport connect, the target
  * creates its "queue" and returns a handle that is to be given to the
@@ -2219,6 +2235,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	if (!nvme_fc_ctrl_get(ctrl))
 		return BLK_STS_IOERR;
 
+	if (!ctrl->ctrl.opts->disable_sqflow) {
+		if (nvme_fc_update_sq_tail(queue, 1) ==
+		    READ_ONCE(queue->sq_head)) {
+			nvme_fc_update_sq_tail(queue, -1);
+			return BLK_STS_RESOURCE;
+		}
+	}
+
 	/* format the FC-NVME CMD IU and fcp_req */
 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
 	cmdiu->data_len = cpu_to_be32(data_len);
@@ -2284,6 +2308,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 					queue->lldd_handle, &op->fcp_req);
 
 	if (ret) {
+		if (ctrl->ctrl.opts->disable_sqflow)
+			nvme_fc_update_sq_tail(queue, -1);
+
 		/*
 		 * If the lld fails to send the command is there an issue with
 		 * the csn value?  If the command that fails is the Connect,
-- 
2.16.4


_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2020-03-10 16:44 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-25 23:59 [PATCH RFC] nvme/fc: sq flow control Hannes Reinecke
2020-02-26  0:08 ` Sagi Grimberg
2020-02-26  0:14   ` Hannes Reinecke
2020-02-26  0:38     ` Sagi Grimberg
2020-02-27 11:27       ` Hannes Reinecke
2020-02-26 10:44 ` Martin Wilck
2020-02-26 15:47   ` Hannes Reinecke
2020-02-26 23:45 ` Sagi Grimberg
2020-02-27  1:46   ` James Smart
2020-02-27  3:52     ` Sagi Grimberg
2020-02-27 21:46       ` Meneghini, John
2020-02-28 16:35       ` James Smart
2020-02-28 10:39   ` Hannes Reinecke
2020-03-09 21:59 ` James Smart
2020-03-10  6:55   ` Hannes Reinecke
2020-03-10 16:44     ` James Smart

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.