All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@fb.com>, Keith Busch <keith.busch@intel.com>,
	Sagi Grimberg <sagi@grimberg.me>
Cc: Max Gurtovoy <maxg@mellanox.com>,
	linux-nvme@lists.infradead.org, linux-block@vger.kernel.org
Subject: [PATCH 02/13] nvme-pci: use atomic bitops to mark a queue enabled
Date: Thu, 29 Nov 2018 20:12:59 +0100	[thread overview]
Message-ID: <20181129191310.9795-3-hch@lst.de> (raw)
In-Reply-To: <20181129191310.9795-1-hch@lst.de>

This gets rid of all the messing with cq_vector and the ->polled field
by using an atomic bitop to mark the queue enabled or not.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 43 ++++++++++++++---------------------------
 1 file changed, 15 insertions(+), 28 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a1bb4bb92e7f..022395a319f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -201,7 +201,8 @@ struct nvme_queue {
 	u16 last_cq_head;
 	u16 qid;
 	u8 cq_phase;
-	u8 polled;
+	unsigned long flags;
+#define NVMEQ_ENABLED		0
 	u32 *dbbuf_sq_db;
 	u32 *dbbuf_cq_db;
 	u32 *dbbuf_sq_ei;
@@ -927,7 +928,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	 * We should not need to do this, but we're still using this to
 	 * ensure we can drain requests on a dying queue.
 	 */
-	if (unlikely(nvmeq->cq_vector < 0 && !nvmeq->polled))
+	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
 		return BLK_STS_IOERR;
 
 	ret = nvme_setup_cmd(ns, req, &cmnd);
@@ -1395,31 +1396,19 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-	int vector;
-
-	spin_lock_irq(&nvmeq->cq_lock);
-	if (nvmeq->cq_vector == -1 && !nvmeq->polled) {
-		spin_unlock_irq(&nvmeq->cq_lock);
+	if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
 		return 1;
-	}
-	vector = nvmeq->cq_vector;
-	nvmeq->dev->online_queues--;
-	nvmeq->cq_vector = -1;
-	nvmeq->polled = false;
-	spin_unlock_irq(&nvmeq->cq_lock);
 
-	/*
-	 * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
-	 * having to grab the lock.
-	 */
+	/* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
 	mb();
 
+	nvmeq->dev->online_queues--;
 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
 		blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
-
-	if (vector != -1)
-		pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
-
+	if (nvmeq->cq_vector == -1)
+		return 0;
+	pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
+	nvmeq->cq_vector = -1;
 	return 0;
 }
 
@@ -1578,13 +1567,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 	else if (result)
 		goto release_cq;
 
-	/*
-	 * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
-	 * invoke free_irq for it and cause a 'Trying to free already-free IRQ
-	 * xxx' warning if the create CQ/SQ command times out.
-	 */
 	nvmeq->cq_vector = vector;
-	nvmeq->polled = polled;
 	nvme_init_queue(nvmeq, qid);
 
 	if (vector != -1) {
@@ -1593,11 +1576,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 			goto release_sq;
 	}
 
+	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
 	return result;
 
 release_sq:
 	nvmeq->cq_vector = -1;
-	nvmeq->polled = false;
 	dev->online_queues--;
 	adapter_delete_sq(dev, qid);
 release_cq:
@@ -1751,6 +1734,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
 		return result;
 	}
 
+	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
 	return result;
 }
 
@@ -2173,6 +2157,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 
 	if (nr_io_queues == 0)
 		return 0;
+	
+	clear_bit(NVMEQ_ENABLED, &adminq->flags);
 
 	if (dev->cmb_use_sqes) {
 		result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2227,6 +2213,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 		adminq->cq_vector = -1;
 		return result;
 	}
+	set_bit(NVMEQ_ENABLED, &adminq->flags);
 	return nvme_create_io_queues(dev);
 }
 
-- 
2.19.1


WARNING: multiple messages have this Message-ID (diff)
From: hch@lst.de (Christoph Hellwig)
Subject: [PATCH 02/13] nvme-pci: use atomic bitops to mark a queue enabled
Date: Thu, 29 Nov 2018 20:12:59 +0100	[thread overview]
Message-ID: <20181129191310.9795-3-hch@lst.de> (raw)
In-Reply-To: <20181129191310.9795-1-hch@lst.de>

This gets rid of all the messing with cq_vector and the ->polled field
by using an atomic bitop to mark the queue enabled or not.

Signed-off-by: Christoph Hellwig <hch at lst.de>
---
 drivers/nvme/host/pci.c | 43 ++++++++++++++---------------------------
 1 file changed, 15 insertions(+), 28 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a1bb4bb92e7f..022395a319f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -201,7 +201,8 @@ struct nvme_queue {
 	u16 last_cq_head;
 	u16 qid;
 	u8 cq_phase;
-	u8 polled;
+	unsigned long flags;
+#define NVMEQ_ENABLED		0
 	u32 *dbbuf_sq_db;
 	u32 *dbbuf_cq_db;
 	u32 *dbbuf_sq_ei;
@@ -927,7 +928,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	 * We should not need to do this, but we're still using this to
 	 * ensure we can drain requests on a dying queue.
 	 */
-	if (unlikely(nvmeq->cq_vector < 0 && !nvmeq->polled))
+	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
 		return BLK_STS_IOERR;
 
 	ret = nvme_setup_cmd(ns, req, &cmnd);
@@ -1395,31 +1396,19 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-	int vector;
-
-	spin_lock_irq(&nvmeq->cq_lock);
-	if (nvmeq->cq_vector == -1 && !nvmeq->polled) {
-		spin_unlock_irq(&nvmeq->cq_lock);
+	if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
 		return 1;
-	}
-	vector = nvmeq->cq_vector;
-	nvmeq->dev->online_queues--;
-	nvmeq->cq_vector = -1;
-	nvmeq->polled = false;
-	spin_unlock_irq(&nvmeq->cq_lock);
 
-	/*
-	 * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
-	 * having to grab the lock.
-	 */
+	/* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
 	mb();
 
+	nvmeq->dev->online_queues--;
 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
 		blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
-
-	if (vector != -1)
-		pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
-
+	if (nvmeq->cq_vector == -1)
+		return 0;
+	pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
+	nvmeq->cq_vector = -1;
 	return 0;
 }
 
@@ -1578,13 +1567,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 	else if (result)
 		goto release_cq;
 
-	/*
-	 * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
-	 * invoke free_irq for it and cause a 'Trying to free already-free IRQ
-	 * xxx' warning if the create CQ/SQ command times out.
-	 */
 	nvmeq->cq_vector = vector;
-	nvmeq->polled = polled;
 	nvme_init_queue(nvmeq, qid);
 
 	if (vector != -1) {
@@ -1593,11 +1576,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 			goto release_sq;
 	}
 
+	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
 	return result;
 
 release_sq:
 	nvmeq->cq_vector = -1;
-	nvmeq->polled = false;
 	dev->online_queues--;
 	adapter_delete_sq(dev, qid);
 release_cq:
@@ -1751,6 +1734,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
 		return result;
 	}
 
+	set_bit(NVMEQ_ENABLED, &nvmeq->flags);
 	return result;
 }
 
@@ -2173,6 +2157,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 
 	if (nr_io_queues == 0)
 		return 0;
+	
+	clear_bit(NVMEQ_ENABLED, &adminq->flags);
 
 	if (dev->cmb_use_sqes) {
 		result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2227,6 +2213,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 		adminq->cq_vector = -1;
 		return result;
 	}
+	set_bit(NVMEQ_ENABLED, &adminq->flags);
 	return nvme_create_io_queues(dev);
 }
 
-- 
2.19.1

  parent reply	other threads:[~2018-11-29 19:13 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-29 19:12 block and nvme polling improvements V2 Christoph Hellwig
2018-11-29 19:12 ` Christoph Hellwig
2018-11-29 19:12 ` [PATCH 01/13] block: move queues types to the block layer Christoph Hellwig
2018-11-29 19:12   ` Christoph Hellwig
2018-11-29 19:50   ` Jens Axboe
2018-11-29 19:50     ` Jens Axboe
2018-11-30  7:56     ` Christoph Hellwig
2018-11-30  7:56       ` Christoph Hellwig
2018-11-30 15:20       ` Jens Axboe
2018-11-30 15:20         ` Jens Axboe
2018-11-30 15:21         ` Christoph Hellwig
2018-11-30 15:21           ` Christoph Hellwig
2018-11-29 20:19   ` Keith Busch
2018-11-29 20:19     ` Keith Busch
2018-11-29 20:25     ` Jens Axboe
2018-11-29 20:25       ` Jens Axboe
2018-11-30  8:00     ` Christoph Hellwig
2018-11-30  8:00       ` Christoph Hellwig
2018-11-30 14:40       ` Keith Busch
2018-11-30 14:40         ` Keith Busch
2018-11-30 15:20       ` Jens Axboe
2018-11-30 15:20         ` Jens Axboe
2018-11-29 19:12 ` Christoph Hellwig [this message]
2018-11-29 19:12   ` [PATCH 02/13] nvme-pci: use atomic bitops to mark a queue enabled Christoph Hellwig
2018-11-29 20:19   ` Keith Busch
2018-11-29 20:19     ` Keith Busch
2018-11-29 19:13 ` [PATCH 03/13] nvme-pci: cleanup SQ allocation a bit Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 20:22   ` Keith Busch
2018-11-29 20:22     ` Keith Busch
2018-11-29 19:13 ` [PATCH 04/13] nvme-pci: only allow polling with separate poll queues Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 05/13] nvme-pci: consolidate code for polling non-dedicated queues Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 06/13] nvme-pci: refactor nvme_disable_io_queues Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 20:37   ` Keith Busch
2018-11-29 20:37     ` Keith Busch
2018-11-29 19:13 ` [PATCH 07/13] nvme-pci: don't poll from irq context when deleting queues Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 20:36   ` Keith Busch
2018-11-29 20:36     ` Keith Busch
2018-11-30  8:08     ` Christoph Hellwig
2018-11-30  8:08       ` Christoph Hellwig
2018-11-30 14:45       ` Keith Busch
2018-11-30 14:45         ` Keith Busch
2018-11-29 19:13 ` [PATCH 08/13] nvme-pci: remove the CQ lock for interrupt driven queues Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 21:08   ` Keith Busch
2018-11-29 21:08     ` Keith Busch
2018-11-30  8:16     ` Christoph Hellwig
2018-11-30  8:16       ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 09/13] nvme-rdma: remove I/O polling support Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 10/13] nvme-mpath: " Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 11/13] block: remove ->poll_fn Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 12/13] block: only allow polling if a poll queue_map exists Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
2018-11-29 19:13 ` [PATCH 13/13] block: enable polling by default if a poll map is initalized Christoph Hellwig
2018-11-29 19:13   ` Christoph Hellwig
  -- strict thread matches above, loose matches on Subject: below --
2018-12-02 16:46 block and nvme polling improvements V3 Christoph Hellwig
2018-12-02 16:46 ` [PATCH 02/13] nvme-pci: use atomic bitops to mark a queue enabled Christoph Hellwig
2018-12-02 16:46   ` Christoph Hellwig
2018-12-04  0:54   ` Sagi Grimberg
2018-12-04  0:54     ` Sagi Grimberg
2018-12-04 15:04     ` Christoph Hellwig
2018-12-04 15:04       ` Christoph Hellwig
2018-12-04 17:11       ` Sagi Grimberg
2018-12-04 17:11         ` Sagi Grimberg
2018-11-21 16:23 block and nvme polling improvements Christoph Hellwig
2018-11-21 16:23 ` [PATCH 02/13] nvme-pci: use atomic bitops to mark a queue enabled Christoph Hellwig
2018-11-21 16:23   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181129191310.9795-3-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@fb.com \
    --cc=keith.busch@intel.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=maxg@mellanox.com \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.