linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: Sagi Grimberg <sagi@grimberg.me>,
	James Smart <james.smart@broadcom.com>,
	linux-nvme@lists.infradead.org, Ming Lei <ming.lei@redhat.com>,
	linux-block@vger.kernel.org, Keith Busch <kbusch@kernel.org>,
	Christoph Hellwig <hch@lst.de>
Subject: [PATCH RFC 2/3] nvme: don't use blk_mq_alloc_request_hctx() for allocating connect request
Date: Fri, 15 Nov 2019 18:42:37 +0800	[thread overview]
Message-ID: <20191115104238.15107-3-ming.lei@redhat.com> (raw)
In-Reply-To: <20191115104238.15107-1-ming.lei@redhat.com>

Use one NVMe specific approach to choose transport queue for connect
request:

- IO connect request uses the unique reserved tag 0
- store the queue id into req->private_rq_data in nvme_alloc_request()
- inside .queue_rq(), select the transport queue via the stored qid

Also request's completion handler need to retrieve the block request via
.command_id and the transport queue instance. So store one flag of the connect
request into rq->private_rq_data before submitting, and make sure that the
flag is available in completion handler, then we can lookup the request
via the flag and the transport queue.

With this approach, we don't need the werid API of blk_mq_alloc_request_hctx()
any more.

Cc: James Smart <james.smart@broadcom.com>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 drivers/nvme/host/core.c   |  9 +++-----
 drivers/nvme/host/fc.c     | 10 +++++++++
 drivers/nvme/host/rdma.c   | 40 +++++++++++++++++++++++++++++++++---
 drivers/nvme/host/tcp.c    | 41 ++++++++++++++++++++++++++++++++++---
 drivers/nvme/target/loop.c | 42 +++++++++++++++++++++++++++++++++++---
 5 files changed, 127 insertions(+), 15 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fa7ba09dca77..e96e3997389b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -481,15 +481,12 @@ struct request *nvme_alloc_request(struct request_queue *q,
 	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
 	struct request *req;
 
-	if (qid == NVME_QID_ANY) {
-		req = blk_mq_alloc_request(q, op, flags);
-	} else {
-		req = blk_mq_alloc_request_hctx(q, op, flags,
-				qid ? qid - 1 : 0);
-	}
+	req = blk_mq_alloc_request(q, op, flags);
 	if (IS_ERR(req))
 		return req;
 
+	if (qid != NVME_QID_ANY)
+		req->private_rq_data = (unsigned long)qid;
 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
 	nvme_clear_nvme_request(req);
 	nvme_req(req)->cmd = cmd;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 265f89e11d8b..6c836e83e59c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2333,6 +2333,16 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	u32 data_len;
 	blk_status_t ret;
 
+	/* connect request requires to use the specified queue */
+	if (rq->tag == 0 && ctrl->ctrl.admin_q != hctx->queue) {
+		unsigned qid = rq->private_rq_data;
+
+		WARN_ON_ONCE(!blk_rq_is_private(rq));
+
+		queue = &ctrl->queues[qid];
+		op->queue = queue;
+	}
+
 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index cb4c3000a57e..6056679e8c77 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1436,13 +1436,35 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 	WARN_ON_ONCE(ret);
 }
 
+static struct request *nvme_rdma_lookup_rq(struct nvme_rdma_queue *queue,
+		struct nvme_rdma_qe *qe, struct nvme_completion *cqe)
+{
+	int i;
+
+	if (cqe->command_id || !nvme_rdma_queue_idx(queue))
+		return blk_mq_tag_to_rq(nvme_rdma_tagset(queue),
+				cqe->command_id);
+
+	/* lookup request for connect IO */
+	for (i = 1; i < queue->ctrl->ctrl.queue_count; i++) {
+		struct request *rq = blk_mq_tag_to_rq(nvme_rdma_tagset(
+					&queue->ctrl->queues[i]), 0);
+
+		if (rq && rq->private_rq_data == (unsigned long)qe)
+			return rq;
+	}
+
+	return NULL;
+}
+
 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
-		struct nvme_completion *cqe, struct ib_wc *wc)
+		struct nvme_rdma_qe *qe, struct ib_wc *wc)
 {
 	struct request *rq;
 	struct nvme_rdma_request *req;
+	struct nvme_completion *cqe = qe->data;
 
-	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+	rq = nvme_rdma_lookup_rq(queue, qe, cqe);
 	if (!rq) {
 		dev_err(queue->ctrl->ctrl.device,
 			"tag 0x%x on QP %#x not found\n",
@@ -1506,7 +1528,7 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 				&cqe->result);
 	else
-		nvme_rdma_process_nvme_rsp(queue, cqe, wc);
+		nvme_rdma_process_nvme_rsp(queue, qe, wc);
 	ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
 
 	nvme_rdma_post_recv(queue, qe);
@@ -1743,6 +1765,18 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	WARN_ON_ONCE(rq->tag < 0);
 
+	/* connect request requires to use the specified queue */
+	if (rq->tag == 0 && queue->ctrl->ctrl.admin_q != hctx->queue) {
+		unsigned qid = rq->private_rq_data;
+
+		WARN_ON_ONCE(!blk_rq_is_private(rq));
+
+		queue = &queue->ctrl->queues[qid];
+		req->queue = queue;
+
+		rq->private_rq_data = (unsigned long)sqe;
+	}
+
 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 770dbcbc999e..5087e2d168f1 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -425,12 +425,35 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
 	queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
 }
 
+static struct request *nvme_tcp_lookup_rq(struct nvme_tcp_queue *queue,
+		struct nvme_tcp_rsp_pdu *pdu, struct nvme_completion *cqe)
+{
+	int i;
+
+	if (cqe->command_id || !nvme_tcp_queue_id(queue))
+		return blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
+				cqe->command_id);
+
+	/* lookup request for connect IO */
+	for (i = 1; i < queue->ctrl->ctrl.queue_count; i++) {
+		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(
+					&queue->ctrl->queues[i]), 0);
+
+		if (rq && rq->private_rq_data == (unsigned long)pdu)
+			return rq;
+	}
+
+	return NULL;
+}
+
+
 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
-		struct nvme_completion *cqe)
+		struct nvme_tcp_rsp_pdu *pdu)
 {
+	struct nvme_completion *cqe = &pdu->cqe;
 	struct request *rq;
 
-	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+	rq = nvme_tcp_lookup_rq(queue, pdu, cqe);
 	if (!rq) {
 		dev_err(queue->ctrl->ctrl.device,
 			"queue %d tag 0x%x not found\n",
@@ -496,7 +519,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 				&cqe->result);
 	else
-		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
+		ret = nvme_tcp_process_nvme_cqe(queue, pdu);
 
 	return ret;
 }
@@ -2155,6 +2178,18 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
+	/* connect request requires to use the specified queue */
+	if (rq->tag == 0 && queue->ctrl->ctrl.admin_q != hctx->queue) {
+		unsigned qid = rq->private_rq_data;
+
+		WARN_ON_ONCE(!blk_rq_is_private(rq));
+
+		queue = &queue->ctrl->queues[qid];
+		req->queue = queue;
+
+		rq->private_rq_data = (unsigned long)queue->pdu;
+	}
+
 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 11f5aea97d1b..d97c3155d86a 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -90,6 +90,28 @@ static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
 	return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
+static struct request *nvme_loop_lookup_rq(struct nvme_loop_queue *queue,
+		struct nvmet_req *req)
+{
+	struct nvme_completion *cqe = req->cqe;
+	int i;
+
+	if (cqe->command_id || !nvme_loop_queue_idx(queue))
+		return blk_mq_tag_to_rq(nvme_loop_tagset(queue),
+				cqe->command_id);
+
+	/* lookup request for connect IO */
+	for (i = 1; i < queue->ctrl->ctrl.queue_count; i++) {
+		struct request *rq = blk_mq_tag_to_rq(nvme_loop_tagset(
+					&queue->ctrl->queues[i]), 0);
+
+		if (rq && rq->private_rq_data == (unsigned long)req)
+			return rq;
+	}
+
+	return NULL;
+}
+
 static void nvme_loop_queue_response(struct nvmet_req *req)
 {
 	struct nvme_loop_queue *queue =
@@ -107,9 +129,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 				&cqe->result);
 	} else {
-		struct request *rq;
-
-		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+		struct request *rq = nvme_loop_lookup_rq(queue, req);
 		if (!rq) {
 			dev_err(queue->ctrl->ctrl.device,
 				"tag 0x%x on queue %d not found\n",
@@ -139,6 +159,22 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
+	/* connect request requires to use the specified queue */
+	if (req->tag == 0) {
+		struct nvme_loop_ctrl *ctrl = hctx->queue->tag_set->driver_data;
+
+		if (ctrl->ctrl.admin_q != hctx->queue) {
+			unsigned qid = req->private_rq_data;
+
+			WARN_ON_ONCE(!blk_rq_is_private(req));
+
+			queue = &ctrl->queues[qid];
+			iod->queue = queue;
+
+			req->private_rq_data = (unsigned long)&iod->req;
+		}
+	}
+
 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
-- 
2.20.1


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  parent reply	other threads:[~2019-11-15 10:43 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-15 10:42 [PATCH RFC 0/3] blk-mq/nvme: use blk_mq_alloc_request() for NVMe's connect request Ming Lei
2019-11-15 10:42 ` [PATCH RFC 1/3] block: reuse one scheduler/flush field for private request's data Ming Lei
2019-11-15 10:42 ` Ming Lei [this message]
2019-11-15 10:42 ` [PATCH RFC 3/3] blk-mq: kill blk_mq_alloc_request_hctx() Ming Lei
2019-11-15 22:38 ` [PATCH RFC 0/3] blk-mq/nvme: use blk_mq_alloc_request() for NVMe's connect request Sagi Grimberg
2019-11-16  7:17   ` Ming Lei
2019-11-17  1:24     ` Bart Van Assche
2019-11-17  4:12       ` Ming Lei
2019-11-18 23:27         ` Bart Van Assche
2019-11-19  0:05     ` Sagi Grimberg
2019-11-19  0:34       ` Keith Busch
2019-11-19  1:43         ` Sagi Grimberg
2019-11-19  2:38         ` Ming Lei
2019-11-19  2:33       ` Ming Lei
2019-11-19 17:56       ` James Smart
2019-11-20  6:35         ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191115104238.15107-3-ming.lei@redhat.com \
    --to=ming.lei@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=hch@lst.de \
    --cc=james.smart@broadcom.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).