All of lore.kernel.org
 help / color / mirror / Atom feed
* nvme: remove the global device list and kthread
@ 2016-02-17 18:03 Christoph Hellwig
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Christoph Hellwig @ 2016-02-17 18:03 UTC (permalink / raw)


This series drops the global nvme thread, and the device list only
needed for it.  Note that I haven't been able to trigger an AER with
my test controller, but the code is based on work for the NVMe over
Fabrics driver that has been properly tested.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/3] nvme: use a work item to submit async event requests
  2016-02-17 18:03 nvme: remove the global device list and kthread Christoph Hellwig
@ 2016-02-17 18:03 ` Christoph Hellwig
  2016-02-17 18:37   ` Keith Busch
                     ` (2 more replies)
  2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
  2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
  2 siblings, 3 replies; 13+ messages in thread
From: Christoph Hellwig @ 2016-02-17 18:03 UTC (permalink / raw)


Use a dedicated work item to submit async event requests instead of the
global kthread.  This simplifies the code and reduces the latencies to
resubmit a request once an even notification happened.

Signed-off-by: Christoph Hellwig <hch at lst.de>
---
 drivers/nvme/host/pci.c | 25 ++++++++++++++++++-------
 1 file changed, 18 insertions(+), 7 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fec7479..21b0be4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -100,6 +100,7 @@ struct nvme_dev {
 	struct work_struct reset_work;
 	struct work_struct scan_work;
 	struct work_struct remove_work;
+	struct work_struct async_work;
 	struct mutex shutdown_lock;
 	bool subsystem;
 	void __iomem *cmb;
@@ -281,8 +282,11 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
 	u16 status = le16_to_cpu(cqe->status) >> 1;
 	u32 result = le32_to_cpu(cqe->result);
 
-	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
+	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
 		++dev->ctrl.event_limit;
+		queue_work(nvme_workq, &dev->async_work);
+	}
+
 	if (status != NVME_SC_SUCCESS)
 		return;
 
@@ -816,15 +820,22 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
 	return 0;
 }
 
-static void nvme_submit_async_event(struct nvme_dev *dev)
+static void nvme_async_event_work(struct work_struct *work)
 {
+	struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+	struct nvme_queue *nvmeq = dev->queues[0];
 	struct nvme_command c;
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = nvme_admin_async_event;
-	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
 
-	__nvme_submit_cmd(dev->queues[0], &c);
+	spin_lock_irq(&nvmeq->q_lock);
+	while (dev->ctrl.event_limit > 0) {
+		c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
+			--dev->ctrl.event_limit;
+		__nvme_submit_cmd(nvmeq, &c);
+	}
+	spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1358,9 +1369,6 @@ static int nvme_kthread(void *data)
 					continue;
 				spin_lock_irq(&nvmeq->q_lock);
 				nvme_process_cq(nvmeq);
-
-				while (i == 0 && dev->ctrl.event_limit > 0)
-					nvme_submit_async_event(dev);
 				spin_unlock_irq(&nvmeq->q_lock);
 			}
 		}
@@ -1929,6 +1937,7 @@ static void nvme_reset_work(struct work_struct *work)
 		goto free_tags;
 
 	dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+	queue_work(nvme_workq, &dev->async_work);
 
 	result = nvme_dev_list_add(dev);
 	if (result)
@@ -2062,6 +2071,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_WORK(&dev->scan_work, nvme_dev_scan);
 	INIT_WORK(&dev->reset_work, nvme_reset_work);
 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+	INIT_WORK(&dev->async_work, nvme_async_event_work);
 	mutex_init(&dev->shutdown_lock);
 	init_completion(&dev->ioq_wait);
 
@@ -2115,6 +2125,7 @@ static void nvme_remove(struct pci_dev *pdev)
 	spin_unlock(&dev_list_lock);
 
 	pci_set_drvdata(pdev, NULL);
+	flush_work(&dev->async_work);
 	flush_work(&dev->reset_work);
 	flush_work(&dev->scan_work);
 	nvme_remove_namespaces(&dev->ctrl);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/3] nvme: don't poll the CQ from the kthread
  2016-02-17 18:03 nvme: remove the global device list and kthread Christoph Hellwig
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
@ 2016-02-17 18:03 ` Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
                     ` (2 more replies)
  2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
  2 siblings, 3 replies; 13+ messages in thread
From: Christoph Hellwig @ 2016-02-17 18:03 UTC (permalink / raw)


There is no reason to do unconditional polling of CQs per the NVMe
spec.

Signed-off-by: Christoph Hellwig <hch at lst.de>
---
 drivers/nvme/host/pci.c | 12 ------------
 1 file changed, 12 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 21b0be4..10839f7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1156,9 +1156,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 	nvmeq->qid = qid;
 	nvmeq->cq_vector = -1;
 	dev->queues[qid] = nvmeq;
-
-	/* make sure queue descriptor is set before queue count, for kthread */
-	mb();
 	dev->queue_count++;
 
 	return nvmeq;
@@ -1345,7 +1342,6 @@ static int nvme_kthread(void *data)
 		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock(&dev_list_lock);
 		list_for_each_entry_safe(dev, next, &dev_list, node) {
-			int i;
 			u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
 			/*
@@ -1363,14 +1359,6 @@ static int nvme_kthread(void *data)
 				}
 				continue;
 			}
-			for (i = 0; i < dev->queue_count; i++) {
-				struct nvme_queue *nvmeq = dev->queues[i];
-				if (!nvmeq)
-					continue;
-				spin_lock_irq(&nvmeq->q_lock);
-				nvme_process_cq(nvmeq);
-				spin_unlock_irq(&nvmeq->q_lock);
-			}
 		}
 		spin_unlock(&dev_list_lock);
 		schedule_timeout(round_jiffies_relative(HZ));
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer
  2016-02-17 18:03 nvme: remove the global device list and kthread Christoph Hellwig
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
  2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
@ 2016-02-17 18:03 ` Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
                     ` (2 more replies)
  2 siblings, 3 replies; 13+ messages in thread
From: Christoph Hellwig @ 2016-02-17 18:03 UTC (permalink / raw)


The only work left in the kthread is the periodic health check for each
controller.  There is no need to run this from process context or keep
a thread context around for it, so replace it with a simpler timer.

Signed-off-by: Christoph Hellwig <hch at lst.de>
---
 drivers/nvme/host/pci.c | 112 ++++++++++--------------------------------------
 1 file changed, 23 insertions(+), 89 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 10839f7..a623360 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -27,7 +27,6 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kdev_t.h>
-#include <linux/kthread.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
@@ -39,6 +38,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/t10-pi.h>
+#include <linux/timer.h>
 #include <linux/types.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <asm/unaligned.h>
@@ -64,11 +64,7 @@ static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0644);
 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
 
-static LIST_HEAD(dev_list);
-static DEFINE_SPINLOCK(dev_list_lock);
-static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
-static wait_queue_head_t nvme_kthread_wait;
 
 struct nvme_dev;
 struct nvme_queue;
@@ -82,7 +78,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
 struct nvme_dev {
-	struct list_head node;
 	struct nvme_queue **queues;
 	struct blk_mq_tag_set tagset;
 	struct blk_mq_tag_set admin_tagset;
@@ -101,6 +96,7 @@ struct nvme_dev {
 	struct work_struct scan_work;
 	struct work_struct remove_work;
 	struct work_struct async_work;
+	struct timer_list watchdog_timer;
 	struct mutex shutdown_lock;
 	bool subsystem;
 	void __iomem *cmb;
@@ -1334,36 +1330,26 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 	return result;
 }
 
-static int nvme_kthread(void *data)
+static void nvme_watchdog_timer(unsigned long data)
 {
-	struct nvme_dev *dev, *next;
+	struct nvme_dev *dev = (struct nvme_dev *)data;
+	u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
-	while (!kthread_should_stop()) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		spin_lock(&dev_list_lock);
-		list_for_each_entry_safe(dev, next, &dev_list, node) {
-			u32 csts = readl(dev->bar + NVME_REG_CSTS);
-
-			/*
-			 * Skip controllers currently under reset.
-			 */
-			if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
-				continue;
-
-			if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
-							csts & NVME_CSTS_CFS) {
-				if (queue_work(nvme_workq, &dev->reset_work)) {
-					dev_warn(dev->ctrl.device,
-						"Failed status: %x, reset controller\n",
-						readl(dev->bar + NVME_REG_CSTS));
-				}
-				continue;
-			}
+	/*
+	 * Skip controllers currently under reset.
+	 */
+	if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
+	    ((csts & NVME_CSTS_CFS) ||
+	     (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
+		if (queue_work(nvme_workq, &dev->reset_work)) {
+			dev_warn(dev->dev,
+				"Failed status: 0x%x, reset controller.\n",
+				csts);
 		}
-		spin_unlock(&dev_list_lock);
-		schedule_timeout(round_jiffies_relative(HZ));
+		return;
 	}
-	return 0;
+
+	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
 }
 
 static int nvme_create_io_queues(struct nvme_dev *dev)
@@ -1777,56 +1763,12 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
 	}
 }
 
-static int nvme_dev_list_add(struct nvme_dev *dev)
-{
-	bool start_thread = false;
-
-	spin_lock(&dev_list_lock);
-	if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
-		start_thread = true;
-		nvme_thread = NULL;
-	}
-	list_add(&dev->node, &dev_list);
-	spin_unlock(&dev_list_lock);
-
-	if (start_thread) {
-		nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-		wake_up_all(&nvme_kthread_wait);
-	} else
-		wait_event_killable(nvme_kthread_wait, nvme_thread);
-
-	if (IS_ERR_OR_NULL(nvme_thread))
-		return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-
-	return 0;
-}
-
-/*
-* Remove the node from the device list and check
-* for whether or not we need to stop the nvme_thread.
-*/
-static void nvme_dev_list_remove(struct nvme_dev *dev)
-{
-	struct task_struct *tmp = NULL;
-
-	spin_lock(&dev_list_lock);
-	list_del_init(&dev->node);
-	if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
-		tmp = nvme_thread;
-		nvme_thread = NULL;
-	}
-	spin_unlock(&dev_list_lock);
-
-	if (tmp)
-		kthread_stop(tmp);
-}
-
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 {
 	int i;
 	u32 csts = -1;
 
-	nvme_dev_list_remove(dev);
+	del_timer_sync(&dev->watchdog_timer);
 
 	mutex_lock(&dev->shutdown_lock);
 	if (dev->bar) {
@@ -1927,9 +1869,7 @@ static void nvme_reset_work(struct work_struct *work)
 	dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
 	queue_work(nvme_workq, &dev->async_work);
 
-	result = nvme_dev_list_add(dev);
-	if (result)
-		goto remove;
+	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
 
 	/*
 	 * Keep the controller around but remove all namespaces if we don't have
@@ -1946,8 +1886,6 @@ static void nvme_reset_work(struct work_struct *work)
 	clear_bit(NVME_CTRL_RESETTING, &dev->flags);
 	return;
 
- remove:
-	nvme_dev_list_remove(dev);
  free_tags:
 	nvme_dev_remove_admin(dev);
 	blk_put_queue(dev->ctrl.admin_q);
@@ -2055,11 +1993,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	dev->dev = get_device(&pdev->dev);
 	pci_set_drvdata(pdev, dev);
 
-	INIT_LIST_HEAD(&dev->node);
 	INIT_WORK(&dev->scan_work, nvme_dev_scan);
 	INIT_WORK(&dev->reset_work, nvme_reset_work);
 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
 	INIT_WORK(&dev->async_work, nvme_async_event_work);
+	setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
+		(unsigned long)dev);
 	mutex_init(&dev->shutdown_lock);
 	init_completion(&dev->ioq_wait);
 
@@ -2108,9 +2047,7 @@ static void nvme_remove(struct pci_dev *pdev)
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-	spin_lock(&dev_list_lock);
-	list_del_init(&dev->node);
-	spin_unlock(&dev_list_lock);
+	del_timer_sync(&dev->watchdog_timer);
 
 	pci_set_drvdata(pdev, NULL);
 	flush_work(&dev->async_work);
@@ -2223,8 +2160,6 @@ static int __init nvme_init(void)
 {
 	int result;
 
-	init_waitqueue_head(&nvme_kthread_wait);
-
 	nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 	if (!nvme_workq)
 		return -ENOMEM;
@@ -2239,7 +2174,6 @@ static void __exit nvme_exit(void)
 {
 	pci_unregister_driver(&nvme_driver);
 	destroy_workqueue(nvme_workq);
-	BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
 	_nvme_check_size();
 }
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 1/3] nvme: use a work item to submit async event requests
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
@ 2016-02-17 18:37   ` Keith Busch
  2016-02-18  8:32   ` Johannes Thumshirn
  2016-02-22  8:37   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2016-02-17 18:37 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:12PM +0100, Christoph Hellwig wrote:
> Use a dedicated work item to submit async event requests instead of the
> global kthread.  This simplifies the code and reduces the latencies to
> resubmit a request once an even notification happened.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Looks good.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 2/3] nvme: don't poll the CQ from the kthread
  2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
@ 2016-02-17 18:44   ` Keith Busch
  2016-02-18  8:34   ` Johannes Thumshirn
  2016-02-22  8:38   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2016-02-17 18:44 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:13PM +0100, Christoph Hellwig wrote:
> There is no reason to do unconditional polling of CQs per the NVMe
> spec.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Looks good. Now we'll find out if anyone was unknowingly relying on
this. :)

The legacy IRQ used for the very first NVMe command after reset tripped
up some devices in the past, but the polling hid the breakage.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer
  2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
@ 2016-02-17 18:44   ` Keith Busch
  2016-02-18  8:36   ` Johannes Thumshirn
  2016-02-22  8:43   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2016-02-17 18:44 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:14PM +0100, Christoph Hellwig wrote:
> The only work left in the kthread is the periodic health check for each
> controller.  There is no need to run this from process context or keep
> a thread context around for it, so replace it with a simpler timer.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Looks good.

Reviewed-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/3] nvme: use a work item to submit async event requests
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
  2016-02-17 18:37   ` Keith Busch
@ 2016-02-18  8:32   ` Johannes Thumshirn
  2016-02-22  8:37   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2016-02-18  8:32 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:12PM +0100, Christoph Hellwig wrote:
> Use a dedicated work item to submit async event requests instead of the
> global kthread.  This simplifies the code and reduces the latencies to
> resubmit a request once an even notification happened.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 2/3] nvme: don't poll the CQ from the kthread
  2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
@ 2016-02-18  8:34   ` Johannes Thumshirn
  2016-02-22  8:38   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2016-02-18  8:34 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:13PM +0100, Christoph Hellwig wrote:
> There is no reason to do unconditional polling of CQs per the NVMe
> spec.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>
> ---
>  drivers/nvme/host/pci.c | 12 ------------
>  1 file changed, 12 deletions(-)
> 
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 21b0be4..10839f7 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -1156,9 +1156,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
>  	nvmeq->qid = qid;
>  	nvmeq->cq_vector = -1;
>  	dev->queues[qid] = nvmeq;
> -
> -	/* make sure queue descriptor is set before queue count, for kthread */
> -	mb();
>  	dev->queue_count++;
>  
>  	return nvmeq;
> @@ -1345,7 +1342,6 @@ static int nvme_kthread(void *data)
>  		set_current_state(TASK_INTERRUPTIBLE);
>  		spin_lock(&dev_list_lock);
>  		list_for_each_entry_safe(dev, next, &dev_list, node) {
> -			int i;
>  			u32 csts = readl(dev->bar + NVME_REG_CSTS);
>  
>  			/*
> @@ -1363,14 +1359,6 @@ static int nvme_kthread(void *data)
>  				}
>  				continue;
>  			}
> -			for (i = 0; i < dev->queue_count; i++) {
> -				struct nvme_queue *nvmeq = dev->queues[i];
> -				if (!nvmeq)
> -					continue;
> -				spin_lock_irq(&nvmeq->q_lock);
> -				nvme_process_cq(nvmeq);
> -				spin_unlock_irq(&nvmeq->q_lock);
> -			}
>  		}
>  		spin_unlock(&dev_list_lock);
>  		schedule_timeout(round_jiffies_relative(HZ));
> -- 
> 2.1.4
> 
> 
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme

Oh yeah, I was hunting down a lockup in that loop ("fixable" with
touch_nmi_watchdog()). Highly appreciate it's gone.


Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer
  2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
@ 2016-02-18  8:36   ` Johannes Thumshirn
  2016-02-22  8:43   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Johannes Thumshirn @ 2016-02-18  8:36 UTC (permalink / raw)


On Wed, Feb 17, 2016@07:03:14PM +0100, Christoph Hellwig wrote:
> The only work left in the kthread is the periodic health check for each
> controller.  There is no need to run this from process context or keep
> a thread context around for it, so replace it with a simpler timer.
> 
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Reviewed-by: Johannes Thumshirn <jthumshirn at suse.de>

-- 
Johannes Thumshirn                                          Storage
jthumshirn at suse.de                                +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: Felix Imend?rffer, Jane Smithard, Graham Norton
HRB 21284 (AG N?rnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/3] nvme: use a work item to submit async event requests
  2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
  2016-02-17 18:37   ` Keith Busch
  2016-02-18  8:32   ` Johannes Thumshirn
@ 2016-02-22  8:37   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Sagi Grimberg @ 2016-02-22  8:37 UTC (permalink / raw)


Looks good,

Reviewed-by: Sagi Grimberg <sagig at mellanox.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 2/3] nvme: don't poll the CQ from the kthread
  2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
  2016-02-18  8:34   ` Johannes Thumshirn
@ 2016-02-22  8:38   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Sagi Grimberg @ 2016-02-22  8:38 UTC (permalink / raw)


Looks good,

Reviewed-by: Sagi Grimberg <sagig at mellanox.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer
  2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
  2016-02-17 18:44   ` Keith Busch
  2016-02-18  8:36   ` Johannes Thumshirn
@ 2016-02-22  8:43   ` Sagi Grimberg
  2 siblings, 0 replies; 13+ messages in thread
From: Sagi Grimberg @ 2016-02-22  8:43 UTC (permalink / raw)


Looks good,

Reviewed-by: Sagi Grimberg <sagig at mellanox.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2016-02-22  8:43 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-17 18:03 nvme: remove the global device list and kthread Christoph Hellwig
2016-02-17 18:03 ` [PATCH 1/3] nvme: use a work item to submit async event requests Christoph Hellwig
2016-02-17 18:37   ` Keith Busch
2016-02-18  8:32   ` Johannes Thumshirn
2016-02-22  8:37   ` Sagi Grimberg
2016-02-17 18:03 ` [PATCH 2/3] nvme: don't poll the CQ from the kthread Christoph Hellwig
2016-02-17 18:44   ` Keith Busch
2016-02-18  8:34   ` Johannes Thumshirn
2016-02-22  8:38   ` Sagi Grimberg
2016-02-17 18:03 ` [PATCH 3/3] nvme: replace the kthread with a per-device watchdog timer Christoph Hellwig
2016-02-17 18:44   ` Keith Busch
2016-02-18  8:36   ` Johannes Thumshirn
2016-02-22  8:43   ` Sagi Grimberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.