All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 0/3] Move active IO termination to the core
@ 2016-01-10  8:59 Sagi Grimberg
  2016-01-10  8:59 ` [PATCH v1 1/3] blk-mq: Export tagset iter function Sagi Grimberg
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Sagi Grimberg @ 2016-01-10  8:59 UTC (permalink / raw)


This patchset adds helper to active IO termination to the nvme
core based on the pci nvme_clear_queues() functionality. This
is needed for live shutdowns and resets during live IO and it's
not pci specific and will be needed for other transports as well.

This patchset applies on top of Keith's "NVMe fixes and updates for 4.5"
patchset.

Changes from v0:
- Removed the nvme core helpers for IO termination and have pci
  use blk-mq helper directly
- Rebased on top of Keith and Christoph's patches

Sagi Grimberg (3):
  blk-mq: Export tagset iter function
  nvme: Use blk-mq helper for IO termination
  blk-mq: Make blk_mq_all_tag_busy_iter static

 block/blk-mq-tag.c      | 15 ++++++++++++---
 drivers/nvme/host/pci.c | 19 +++++--------------
 include/linux/blk-mq.h  |  4 ++--
 3 files changed, 19 insertions(+), 19 deletions(-)

-- 
1.8.4.3

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 1/3] blk-mq: Export tagset iter function
  2016-01-10  8:59 [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
@ 2016-01-10  8:59 ` Sagi Grimberg
  2016-01-10  8:59 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 15+ messages in thread
From: Sagi Grimberg @ 2016-01-10  8:59 UTC (permalink / raw)


Its useful to iterate on all the active tags in cases
where we will need to fail all the queues IO.

Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
 block/blk-mq-tag.c     | 10 ++++++++++
 include/linux/blk-mq.h |  2 ++
 2 files changed, 12 insertions(+)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index abdbb47405cb..39660ca8945b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -474,6 +474,16 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+		busy_tag_iter_fn *fn, void *priv)
+{
+	int i;
+
+	for (i = 0; i < tagset->nr_hw_queues; i++)
+		blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+}
+EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 		void *priv)
 {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7fc9296b5742..ecab7b2b6a06 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -240,6 +240,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 		void *priv);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+		busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_mq_freeze_queue_start(struct request_queue *q);
-- 
1.8.4.3

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-01-10  8:59 [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
  2016-01-10  8:59 ` [PATCH v1 1/3] blk-mq: Export tagset iter function Sagi Grimberg
@ 2016-01-10  8:59 ` Sagi Grimberg
  2016-01-15 16:48   ` Keith Busch
  2016-01-10  8:59 ` [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static Sagi Grimberg
  2016-01-13  7:56 ` [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
  3 siblings, 1 reply; 15+ messages in thread
From: Sagi Grimberg @ 2016-01-10  8:59 UTC (permalink / raw)


blk-mq offers a tagset iterator so let's use that
instead of using nvme_clear_queues.

Note, we changed nvme_queue_cancel_ios name to nvme_cancel_io
as there is no concept of a queue now in this function (we
also lost the print).

Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
 drivers/nvme/host/pci.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6e80390d8bd5..7872dd884c6b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -991,16 +991,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	return BLK_EH_RESET_TIMER;
 }
 
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
 {
-	struct nvme_queue *nvmeq = data;
+	struct nvme_dev *dev = data;
 	int status;
 
 	if (!blk_mq_request_started(req))
 		return;
 
-	dev_warn(nvmeq->q_dmadev,
-		 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+	dev_warn(dev->dev, "Cancelling I/O %d\n", req->tag);
 
 	status = NVME_SC_ABORT_REQ;
 	if (blk_queue_dying(req->q))
@@ -1057,14 +1056,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 	return 0;
 }
 
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
-	spin_lock_irq(&nvmeq->q_lock);
-	if (nvmeq->tags && *nvmeq->tags)
-		blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
-	spin_unlock_irq(&nvmeq->q_lock);
-}
-
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
 	struct nvme_queue *nvmeq = dev->queues[0];
@@ -1836,8 +1827,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	}
 	nvme_dev_unmap(dev);
 
-	for (i = dev->queue_count - 1; i >= 0; i--)
-		nvme_clear_queue(dev->queues[i]);
+	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
 	mutex_unlock(&dev->shutdown_lock);
 }
 
-- 
1.8.4.3

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static
  2016-01-10  8:59 [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
  2016-01-10  8:59 ` [PATCH v1 1/3] blk-mq: Export tagset iter function Sagi Grimberg
  2016-01-10  8:59 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
@ 2016-01-10  8:59 ` Sagi Grimberg
  2016-01-11  4:16   ` Christoph Hellwig
  2016-01-13  7:56 ` [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
  3 siblings, 1 reply; 15+ messages in thread
From: Sagi Grimberg @ 2016-01-10  8:59 UTC (permalink / raw)


No caller outside the blk-mq code so we can settle
with it static.

Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
---
 block/blk-mq-tag.c     | 5 ++---
 include/linux/blk-mq.h | 2 --
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 39660ca8945b..be635faaeae8 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
 	}
 }
 
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
-		void *priv)
+static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+		busy_tag_iter_fn *fn, void *priv)
 {
 	if (tags->nr_reserved_tags)
 		bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
 	bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
 			false);
 }
-EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 		busy_tag_iter_fn *fn, void *priv)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ecab7b2b6a06..60a19aaf51ad 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -238,8 +238,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
-		void *priv);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 		busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
-- 
1.8.4.3

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static
  2016-01-10  8:59 ` [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static Sagi Grimberg
@ 2016-01-11  4:16   ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2016-01-11  4:16 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 0/3] Move active IO termination to the core
  2016-01-10  8:59 [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
                   ` (2 preceding siblings ...)
  2016-01-10  8:59 ` [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static Sagi Grimberg
@ 2016-01-13  7:56 ` Sagi Grimberg
  3 siblings, 0 replies; 15+ messages in thread
From: Sagi Grimberg @ 2016-01-13  7:56 UTC (permalink / raw)


Keith, can you have a look?

It would help to get your ack.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-01-10  8:59 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
@ 2016-01-15 16:48   ` Keith Busch
  0 siblings, 0 replies; 15+ messages in thread
From: Keith Busch @ 2016-01-15 16:48 UTC (permalink / raw)


On Sun, Jan 10, 2016@10:59:48AM +0200, Sagi Grimberg wrote:
> blk-mq offers a tagset iterator so let's use that
> instead of using nvme_clear_queues.
> 
> Note, we changed nvme_queue_cancel_ios name to nvme_cancel_io
> as there is no concept of a queue now in this function (we
> also lost the print).
> 
> Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
> Reviewed-by: Christoph Hellwig <hch at lst.de>

Looks good to me.

Acked-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-04 15:44             ` Keith Busch
@ 2016-02-05  6:53               ` Wenbo Wang
  0 siblings, 0 replies; 15+ messages in thread
From: Wenbo Wang @ 2016-02-05  6:53 UTC (permalink / raw)


Thanks.

The problem is that just after nvme_cancel_queue_ios checks the request is started, someone else move the request to q->requeue_list. 
The following patch I proposed has this issue. Queue_rq returns BLK_MQ_RQ_QUEUE_BUSY to __blk_mq_run_hw_queue, which re-queues the request to q->queue_list.
The solution may be call blk_mq_start_request after checking cq_vector. However I suspect some other code may break nvme_cancel_queue_ios too.

Is it a general solution to add a bit lock for each request and use that lock to synchronize stuffs?

Subject: [PATCH v2] NVMe: do not touch sq door bell if nvmeq has been suspended
>@@ -678,6 +678,11 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
> 	blk_mq_start_request(req);
> 
> 	spin_lock_irq(&nvmeq->q_lock);
>+	if (unlikely(nvmeq->cq_vector == -1)) {
>+		ret = BLK_MQ_RQ_QUEUE_BUSY;
>+		spin_unlock_irq(&nvmeq->q_lock);
>+		goto out;
>+	}
> 	__nvme_submit_cmd(nvmeq, &cmnd);
> 	nvme_process_cq(nvmeq);
> 	spin_unlock_irq(&nvmeq->q_lock);
>--



-----Original Message-----
From: Keith Busch [mailto:keith.busch@intel.com] 
Sent: Thursday, February 4, 2016 11:45 PM
To: Wenbo Wang
Cc: Sagi Grimberg; Jens Axboe; linux-nvme at lists.infradead.org
Subject: Re: [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination

On Thu, Feb 04, 2016@02:28:24PM +0000, Wenbo Wang wrote:
> Is the following execution valid?
> 
> 1. request A is linked in q->request_list

q->request_list? Do you mean q->requeue_list?

> 2. device reset
> 3. all requests (including request A) are cancelled by 
> nvme_dev_disable()->nvme_clear_queue()

If the request is on some list owned by the request_queue, it was not started by the driver, and nvme_clear_queue doesn't do anything to it.

> 4. device restarted
> 5. flush q->request_list, and request A is again running in queue_rq.         <-- since request A has been cancelled, it shall not be running again

Request A was not cancelled if it was never started. It will be submitted when the driver restarts the queues h/w contexts.

Cancelling a command with the controller does not mean ending the request in the block layer.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-04 14:28           ` Wenbo Wang
@ 2016-02-04 15:44             ` Keith Busch
  2016-02-05  6:53               ` Wenbo Wang
  0 siblings, 1 reply; 15+ messages in thread
From: Keith Busch @ 2016-02-04 15:44 UTC (permalink / raw)


On Thu, Feb 04, 2016@02:28:24PM +0000, Wenbo Wang wrote:
> Is the following execution valid?
> 
> 1. request A is linked in q->request_list

q->request_list? Do you mean q->requeue_list?

> 2. device reset
> 3. all requests (including request A) are cancelled by nvme_dev_disable()->nvme_clear_queue()

If the request is on some list owned by the request_queue, it was not
started by the driver, and nvme_clear_queue doesn't do anything to it.

> 4. device restarted
> 5. flush q->request_list, and request A is again running in queue_rq.         <-- since request A has been cancelled, it shall not be running again

Request A was not cancelled if it was never started. It will be submitted
when the driver restarts the queues h/w contexts.

Cancelling a command with the controller does not mean ending the request
in the block layer.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-03  1:26         ` Keith Busch
@ 2016-02-04 14:28           ` Wenbo Wang
  2016-02-04 15:44             ` Keith Busch
  0 siblings, 1 reply; 15+ messages in thread
From: Wenbo Wang @ 2016-02-04 14:28 UTC (permalink / raw)


Is the following execution valid?

1. request A is linked in q->request_list
2. device reset
3. all requests (including request A) are cancelled by nvme_dev_disable()->nvme_clear_queue()
4. device restarted
5. flush q->request_list, and request A is again running in queue_rq.         <-- since request A has been cancelled, it shall not be running again

-----Original Message-----
From: Keith Busch [mailto:keith.busch@intel.com] 
Sent: Wednesday, February 3, 2016 9:27 AM
To: Wenbo Wang
Cc: Sagi Grimberg; Jens Axboe; linux-nvme at lists.infradead.org
Subject: Re: [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination

On Wed, Feb 03, 2016@12:35:55AM +0000, Wenbo Wang wrote:
> After canceling these requests, the requests and tags are freed. If queues are restarted shortly, these already freed requests will be queue_rq again? This seems not correct.

That's not true. We only set DNR if the queue is dead. Otherwise the requests are eligible for requeuing if they haven't exceeded total time.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-03  0:35       ` Wenbo Wang
@ 2016-02-03  1:26         ` Keith Busch
  2016-02-04 14:28           ` Wenbo Wang
  0 siblings, 1 reply; 15+ messages in thread
From: Keith Busch @ 2016-02-03  1:26 UTC (permalink / raw)


On Wed, Feb 03, 2016@12:35:55AM +0000, Wenbo Wang wrote:
> After canceling these requests, the requests and tags are freed. If queues are restarted shortly, these already freed requests will be queue_rq again? This seems not correct.

That's not true. We only set DNR if the queue is dead. Otherwise the
requests are eligible for requeuing if they haven't exceeded total time.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-02 16:49     ` Keith Busch
@ 2016-02-03  0:35       ` Wenbo Wang
  2016-02-03  1:26         ` Keith Busch
  0 siblings, 1 reply; 15+ messages in thread
From: Wenbo Wang @ 2016-02-03  0:35 UTC (permalink / raw)


After canceling these requests, the requests and tags are freed. If queues are restarted shortly, these already freed requests will be queue_rq again? This seems not correct.

-----Original Message-----
From: Keith Busch [mailto:keith.busch@intel.com] 
Sent: Wednesday, February 3, 2016 12:49 AM
To: Wenbo Wang
Cc: Sagi Grimberg; Jens Axboe; linux-nvme at lists.infradead.org
Subject: Re: [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination

On Tue, Feb 02, 2016@04:32:33PM +0000, Wenbo Wang wrote:
> If some requests are in q->request_list or ctx->rq_list, does nvme_cancel_io remove them from these lists? If not, will it cause any issue?

It's fine to let them queue there if we expect to restart the controller. We're going to restart the queues shortly.

If we're about to kill the queue, blk-mq's timeout handler kills unstarted requests.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-02 16:32   ` Wenbo Wang
@ 2016-02-02 16:49     ` Keith Busch
  2016-02-03  0:35       ` Wenbo Wang
  0 siblings, 1 reply; 15+ messages in thread
From: Keith Busch @ 2016-02-02 16:49 UTC (permalink / raw)


On Tue, Feb 02, 2016@04:32:33PM +0000, Wenbo Wang wrote:
> If some requests are in q->request_list or ctx->rq_list, does nvme_cancel_io remove them from these lists? If not, will it cause any issue?

It's fine to let them queue there if we expect to restart the
controller. We're going to restart the queues shortly.

If we're about to kill the queue, blk-mq's timeout handler kills
unstarted requests.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-02 12:44 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
@ 2016-02-02 16:32   ` Wenbo Wang
  2016-02-02 16:49     ` Keith Busch
  0 siblings, 1 reply; 15+ messages in thread
From: Wenbo Wang @ 2016-02-02 16:32 UTC (permalink / raw)


If some requests are in q->request_list or ctx->rq_list, does nvme_cancel_io remove them from these lists? If not, will it cause any issue?

-----Original Message-----
From: Linux-nvme [mailto:linux-nvme-bounces@lists.infradead.org] On Behalf Of Sagi Grimberg
Sent: Tuesday, February 2, 2016 8:45 PM
To: Jens Axboe
Cc: linux-nvme at lists.infradead.org
Subject: [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination

blk-mq offers a tagset iterator so let's use that instead of using nvme_clear_queues.

Note, we changed nvme_queue_cancel_ios name to nvme_cancel_io as there is no concept of a queue now in this function (we also lost the print).

Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
Acked-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/pci.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6e80390d8bd5..7872dd884c6b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -991,16 +991,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	return BLK_EH_RESET_TIMER;
 }
 
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool 
+reserved)
 {
-	struct nvme_queue *nvmeq = data;
+	struct nvme_dev *dev = data;
 	int status;
 
 	if (!blk_mq_request_started(req))
 		return;
 
-	dev_warn(nvmeq->q_dmadev,
-		 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+	dev_warn(dev->dev, "Cancelling I/O %d\n", req->tag);
 
 	status = NVME_SC_ABORT_REQ;
 	if (blk_queue_dying(req->q))
@@ -1057,14 +1056,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 	return 0;
 }
 
-static void nvme_clear_queue(struct nvme_queue *nvmeq) -{
-	spin_lock_irq(&nvmeq->q_lock);
-	if (nvmeq->tags && *nvmeq->tags)
-		blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
-	spin_unlock_irq(&nvmeq->q_lock);
-}
-
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)  {
 	struct nvme_queue *nvmeq = dev->queues[0]; @@ -1836,8 +1827,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	}
 	nvme_dev_unmap(dev);
 
-	for (i = dev->queue_count - 1; i >= 0; i--)
-		nvme_clear_queue(dev->queues[i]);
+	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
 	mutex_unlock(&dev->shutdown_lock);
 }
 
--
1.8.4.3


_______________________________________________
Linux-nvme mailing list
Linux-nvme at lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination
  2016-02-02 12:44 [PATCH RESEND " Sagi Grimberg
@ 2016-02-02 12:44 ` Sagi Grimberg
  2016-02-02 16:32   ` Wenbo Wang
  0 siblings, 1 reply; 15+ messages in thread
From: Sagi Grimberg @ 2016-02-02 12:44 UTC (permalink / raw)


blk-mq offers a tagset iterator so let's use that
instead of using nvme_clear_queues.

Note, we changed nvme_queue_cancel_ios name to nvme_cancel_io
as there is no concept of a queue now in this function (we
also lost the print).

Signed-off-by: Sagi Grimberg <sagig at mellanox.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
Acked-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/pci.c | 19 +++++--------------
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6e80390d8bd5..7872dd884c6b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -991,16 +991,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	return BLK_EH_RESET_TIMER;
 }
 
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
 {
-	struct nvme_queue *nvmeq = data;
+	struct nvme_dev *dev = data;
 	int status;
 
 	if (!blk_mq_request_started(req))
 		return;
 
-	dev_warn(nvmeq->q_dmadev,
-		 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+	dev_warn(dev->dev, "Cancelling I/O %d\n", req->tag);
 
 	status = NVME_SC_ABORT_REQ;
 	if (blk_queue_dying(req->q))
@@ -1057,14 +1056,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 	return 0;
 }
 
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
-	spin_lock_irq(&nvmeq->q_lock);
-	if (nvmeq->tags && *nvmeq->tags)
-		blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
-	spin_unlock_irq(&nvmeq->q_lock);
-}
-
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
 	struct nvme_queue *nvmeq = dev->queues[0];
@@ -1836,8 +1827,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	}
 	nvme_dev_unmap(dev);
 
-	for (i = dev->queue_count - 1; i >= 0; i--)
-		nvme_clear_queue(dev->queues[i]);
+	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
 	mutex_unlock(&dev->shutdown_lock);
 }
 
-- 
1.8.4.3

^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2016-02-05  6:53 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-10  8:59 [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
2016-01-10  8:59 ` [PATCH v1 1/3] blk-mq: Export tagset iter function Sagi Grimberg
2016-01-10  8:59 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
2016-01-15 16:48   ` Keith Busch
2016-01-10  8:59 ` [PATCH v1 3/3] blk-mq: Make blk_mq_all_tag_busy_iter static Sagi Grimberg
2016-01-11  4:16   ` Christoph Hellwig
2016-01-13  7:56 ` [PATCH v1 0/3] Move active IO termination to the core Sagi Grimberg
2016-02-02 12:44 [PATCH RESEND " Sagi Grimberg
2016-02-02 12:44 ` [PATCH v1 2/3] nvme: Use blk-mq helper for IO termination Sagi Grimberg
2016-02-02 16:32   ` Wenbo Wang
2016-02-02 16:49     ` Keith Busch
2016-02-03  0:35       ` Wenbo Wang
2016-02-03  1:26         ` Keith Busch
2016-02-04 14:28           ` Wenbo Wang
2016-02-04 15:44             ` Keith Busch
2016-02-05  6:53               ` Wenbo Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.