* nvme reset and probe updates V2
@ 2015-10-03 13:39 Christoph Hellwig
2015-10-03 13:39 ` [PATCH 1/8] NVMe: Reference count open namespaces Christoph Hellwig
` (7 more replies)
0 siblings, 8 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
Hi Jens, hi Keith,
this serie contains a couple small update and cleanups for the
reset and probe path in the nvme driver. I've included Keith
previous 3 patches dealing with namespaces attachment as I
depend on them and this allows the whole series to be applied
directly to Jens' tree.
Changes since V1:
- rebased on top of Jens' for-linux tree
- included Keith patches in the series
- dropped two patches
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 1/8] NVMe: Reference count open namespaces
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-08 15:43 ` Sagi Grimberg
2015-10-03 13:39 ` [PATCH 2/8] NVMe: Namespace removal simplifications Christoph Hellwig
` (6 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
From: Keith Busch <keith.busch@intel.com>
Dynamic namespace attachment means the namespace may be removed at any
time, so the namespace reference count can not be tied to the device
reference count. This fixes a NULL dereference if an opened namespace
is detached from a controller.
Signed-off-by: Keith Busch <keith.busch at intel.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
drivers/block/nvme-core.c | 29 ++++++++++++++++++++---------
include/linux/nvme.h | 1 +
2 files changed, 21 insertions(+), 9 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 6f04771..b02ae3d 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1943,6 +1943,18 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
#define nvme_compat_ioctl NULL
#endif
+static void nvme_free_ns(struct kref *kref)
+{
+ struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
+
+ spin_lock(&dev_list_lock);
+ ns->disk->private_data = NULL;
+ spin_unlock(&dev_list_lock);
+
+ put_disk(ns->disk);
+ kfree(ns);
+}
+
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
int ret = 0;
@@ -1952,21 +1964,25 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
ns = bdev->bd_disk->private_data;
if (!ns)
ret = -ENXIO;
- else if (!kref_get_unless_zero(&ns->dev->kref))
+ else if (!kref_get_unless_zero(&ns->kref))
ret = -ENXIO;
+ else if (!kref_get_unless_zero(&ns->dev->kref)) {
+ kref_put(&ns->kref, nvme_free_ns);
+ ret = -ENXIO;
+ }
spin_unlock(&dev_list_lock);
return ret;
}
static void nvme_free_dev(struct kref *kref);
-
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
struct nvme_ns *ns = disk->private_data;
struct nvme_dev *dev = ns->dev;
kref_put(&dev->kref, nvme_free_dev);
+ kref_put(&ns->kref, nvme_free_ns);
}
static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
@@ -2126,6 +2142,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
if (!disk)
goto out_free_queue;
+ kref_init(&ns->kref);
ns->ns_id = nsid;
ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
@@ -2360,13 +2377,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
static void nvme_free_namespace(struct nvme_ns *ns)
{
list_del(&ns->list);
-
- spin_lock(&dev_list_lock);
- ns->disk->private_data = NULL;
- spin_unlock(&dev_list_lock);
-
- put_disk(ns->disk);
- kfree(ns);
+ kref_put(&ns->kref, nvme_free_ns);
}
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index b5812c3..992b9c1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -135,6 +135,7 @@ struct nvme_ns {
struct nvme_dev *dev;
struct request_queue *queue;
struct gendisk *disk;
+ struct kref kref;
unsigned ns_id;
int lba_shift;
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 2/8] NVMe: Namespace removal simplifications
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
2015-10-03 13:39 ` [PATCH 1/8] NVMe: Reference count open namespaces Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-08 15:47 ` Sagi Grimberg
2015-10-03 13:39 ` [PATCH 3/8] NVMe: Simplify device resume on io queue failure Christoph Hellwig
` (5 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
From: Keith Busch <keith.busch@intel.com>
This liberates namespace removal from the device, allowing gendisk
references to be closed independent of the nvme controller reference
count.
Signed-off-by: Keith Busch <keith.busch at intel.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
drivers/block/nvme-core.c | 42 ++++++++++--------------------------------
1 file changed, 10 insertions(+), 32 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b02ae3d..904b54f 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1943,6 +1943,7 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
#define nvme_compat_ioctl NULL
#endif
+static void nvme_free_dev(struct kref *kref);
static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -1951,6 +1952,7 @@ static void nvme_free_ns(struct kref *kref)
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
+ kref_put(&ns->dev->kref, nvme_free_dev);
put_disk(ns->disk);
kfree(ns);
}
@@ -1966,22 +1968,14 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
ret = -ENXIO;
else if (!kref_get_unless_zero(&ns->kref))
ret = -ENXIO;
- else if (!kref_get_unless_zero(&ns->dev->kref)) {
- kref_put(&ns->kref, nvme_free_ns);
- ret = -ENXIO;
- }
spin_unlock(&dev_list_lock);
return ret;
}
-static void nvme_free_dev(struct kref *kref);
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
struct nvme_ns *ns = disk->private_data;
- struct nvme_dev *dev = ns->dev;
-
- kref_put(&dev->kref, nvme_free_dev);
kref_put(&ns->kref, nvme_free_ns);
}
@@ -2179,6 +2173,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
+ kref_get(&dev->kref);
add_disk(ns->disk);
if (ns->ms) {
struct block_device *bd = bdget_disk(ns->disk, 0);
@@ -2374,12 +2369,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return result;
}
-static void nvme_free_namespace(struct nvme_ns *ns)
-{
- list_del(&ns->list);
- kref_put(&ns->kref, nvme_free_ns);
-}
-
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
@@ -2421,7 +2410,9 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (kill || !blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
- }
+ }
+ list_del_init(&ns->list);
+ kref_put(&ns->kref, nvme_free_ns);
}
static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
@@ -2432,18 +2423,14 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
for (i = 1; i <= nn; i++) {
ns = nvme_find_ns(dev, i);
if (ns) {
- if (revalidate_disk(ns->disk)) {
+ if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
- nvme_free_namespace(ns);
- }
} else
nvme_alloc_ns(dev, i);
}
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- if (ns->ns_id > nn) {
+ if (ns->ns_id > nn)
nvme_ns_remove(ns);
- nvme_free_namespace(ns);
- }
}
list_sort(NULL, &dev->namespaces, ns_cmp);
}
@@ -2833,9 +2820,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns;
+ struct nvme_ns *ns, *next;
- list_for_each_entry(ns, &dev->namespaces, list)
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list)
nvme_ns_remove(ns);
}
@@ -2891,21 +2878,12 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
-static void nvme_free_namespaces(struct nvme_dev *dev)
-{
- struct nvme_ns *ns, *next;
-
- list_for_each_entry_safe(ns, next, &dev->namespaces, list)
- nvme_free_namespace(ns);
-}
-
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
put_device(dev->dev);
put_device(dev->device);
- nvme_free_namespaces(dev);
nvme_release_instance(dev);
if (dev->tagset.tags)
blk_mq_free_tag_set(&dev->tagset);
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 3/8] NVMe: Simplify device resume on io queue failure
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
2015-10-03 13:39 ` [PATCH 1/8] NVMe: Reference count open namespaces Christoph Hellwig
2015-10-03 13:39 ` [PATCH 2/8] NVMe: Namespace removal simplifications Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-03 13:39 ` [PATCH 4/8] nvme: delete dev from dev_list in nvme_reset Christoph Hellwig
` (4 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
From: Keith Busch <keith.busch@intel.com>
Releasing IO queues and disks was done in a work queue outside the
controller resume context to delete namespaces if the controller failed
after a resume from suspend. This is unnecessary since we can resume
a device asynchronously.
This patch makes resume use probe_work so it can directly remove
namespaces if the device is manageable but not IO capable. Since the
deleting disks was the only reason we had the convoluted "reset_workfn",
this patch removes that unnecessary indirection.
Signed-off-by: Keith Busch <keith.busch at intel.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
drivers/block/nvme-core.c | 34 ++++++----------------------------
include/linux/nvme.h | 1 -
2 files changed, 6 insertions(+), 29 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 904b54f..bf35846 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1285,7 +1285,6 @@ static void nvme_abort_req(struct request *req)
list_del_init(&dev->node);
dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
- dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
out:
spin_unlock_irqrestore(&dev_list_lock, flags);
@@ -2089,7 +2088,6 @@ static int nvme_kthread(void *data)
dev_warn(dev->dev,
"Failed status: %x, reset controller\n",
readl(&dev->bar->csts));
- dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
continue;
}
@@ -3025,14 +3023,6 @@ static int nvme_remove_dead_ctrl(void *arg)
return 0;
}
-static void nvme_remove_disks(struct work_struct *ws)
-{
- struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
-
- nvme_free_queues(dev, 1);
- nvme_dev_remove(dev);
-}
-
static int nvme_dev_resume(struct nvme_dev *dev)
{
int ret;
@@ -3041,10 +3031,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
if (ret)
return ret;
if (dev->online_queues < 2) {
- spin_lock(&dev_list_lock);
- dev->reset_workfn = nvme_remove_disks;
- queue_work(nvme_workq, &dev->reset_work);
- spin_unlock(&dev_list_lock);
+ dev_warn(dev->dev, "IO queues not created\n");
+ nvme_free_queues(dev, 1);
+ nvme_dev_remove(dev);
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
@@ -3091,12 +3080,6 @@ static void nvme_reset_failed_dev(struct work_struct *ws)
nvme_dev_reset(dev);
}
-static void nvme_reset_workfn(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
- dev->reset_workfn(work);
-}
-
static int nvme_reset(struct nvme_dev *dev)
{
int ret = -EBUSY;
@@ -3106,7 +3089,6 @@ static int nvme_reset(struct nvme_dev *dev)
spin_lock(&dev_list_lock);
if (!work_pending(&dev->reset_work)) {
- dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
ret = 0;
}
@@ -3159,8 +3141,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
- dev->reset_workfn = nvme_reset_failed_dev;
- INIT_WORK(&dev->reset_work, nvme_reset_workfn);
+ INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
@@ -3223,7 +3204,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_shutdown(dev);
else
- nvme_dev_resume(dev);
+ schedule_work(&dev->probe_work);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -3277,10 +3258,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
- ndev->reset_workfn = nvme_reset_failed_dev;
- queue_work(nvme_workq, &ndev->reset_work);
- }
+ schedule_work(&ndev->probe_work);
return 0;
}
#endif
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 992b9c1..7725b4c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -104,7 +104,6 @@ struct nvme_dev {
struct list_head namespaces;
struct kref kref;
struct device *device;
- work_func_t reset_workfn;
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 4/8] nvme: delete dev from dev_list in nvme_reset
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
` (2 preceding siblings ...)
2015-10-03 13:39 ` [PATCH 3/8] NVMe: Simplify device resume on io queue failure Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-03 13:39 ` [PATCH 5/8] nvme: merge nvme_dev_reset into nvme_reset_failed_dev Christoph Hellwig
` (3 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
Device resets need to delete the device from the device list before
kicking of the reset an re-probe, otherwise we get the device added
to the list twice. nvme_reset is the only side missing this deletion
at the moment, and this patch adds it.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Keith Busch <keith.busch at intel.com>
---
drivers/block/nvme-core.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index bf35846..be35b1d 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3089,6 +3089,7 @@ static int nvme_reset(struct nvme_dev *dev)
spin_lock(&dev_list_lock);
if (!work_pending(&dev->reset_work)) {
+ list_del_init(&dev->node);
queue_work(nvme_workq, &dev->reset_work);
ret = 0;
}
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 5/8] nvme: merge nvme_dev_reset into nvme_reset_failed_dev
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
` (3 preceding siblings ...)
2015-10-03 13:39 ` [PATCH 4/8] nvme: delete dev from dev_list in nvme_reset Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-03 13:39 ` [PATCH 6/8] nvme: factor reset code into a common helper Christoph Hellwig
` (2 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
And give the resulting function a more descriptive name.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Keith Busch <keith.busch at intel.com>
---
drivers/block/nvme-core.c | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index be35b1d..509ad4b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -84,7 +84,6 @@ static wait_queue_head_t nvme_kthread_wait;
static struct class *nvme_class;
-static void nvme_reset_failed_dev(struct work_struct *ws);
static int nvme_reset(struct nvme_dev *dev);
static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -3053,8 +3052,9 @@ static void nvme_dead_ctrl(struct nvme_dev *dev)
}
}
-static void nvme_dev_reset(struct nvme_dev *dev)
+static void nvme_reset_work(struct work_struct *ws)
{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
bool in_probe = work_busy(&dev->probe_work);
nvme_dev_shutdown(dev);
@@ -3074,12 +3074,6 @@ static void nvme_dev_reset(struct nvme_dev *dev)
schedule_work(&dev->probe_work);
}
-static void nvme_reset_failed_dev(struct work_struct *ws)
-{
- struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
- nvme_dev_reset(dev);
-}
-
static int nvme_reset(struct nvme_dev *dev)
{
int ret = -EBUSY;
@@ -3142,7 +3136,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
- INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ INIT_WORK(&dev->reset_work, nvme_reset_work);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 6/8] nvme: factor reset code into a common helper
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
` (4 preceding siblings ...)
2015-10-03 13:39 ` [PATCH 5/8] nvme: merge nvme_dev_reset into nvme_reset_failed_dev Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-03 13:39 ` [PATCH 7/8] nvme: merge nvme_dev_start, nvme_dev_resume and nvme_async_probe Christoph Hellwig
2015-10-03 13:39 ` [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues Christoph Hellwig
7 siblings, 0 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
Signed-off-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Keith Busch <keith.busch at intel.com>
---
drivers/block/nvme-core.c | 48 +++++++++++++++++++++++------------------------
1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 509ad4b..e03a95b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -84,6 +84,7 @@ static wait_queue_head_t nvme_kthread_wait;
static struct class *nvme_class;
+static int __nvme_reset(struct nvme_dev *dev);
static int nvme_reset(struct nvme_dev *dev);
static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -1276,17 +1277,13 @@ static void nvme_abort_req(struct request *req)
struct nvme_command cmd;
if (!nvmeq->qid || cmd_rq->aborted) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev_list_lock, flags);
- if (work_busy(&dev->reset_work))
- goto out;
- list_del_init(&dev->node);
- dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
- req->tag, nvmeq->qid);
- queue_work(nvme_workq, &dev->reset_work);
- out:
- spin_unlock_irqrestore(&dev_list_lock, flags);
+ spin_lock(&dev_list_lock);
+ if (!__nvme_reset(dev)) {
+ dev_warn(dev->dev,
+ "I/O %d QID %d timeout, reset controller\n",
+ req->tag, nvmeq->qid);
+ }
+ spin_unlock(&dev_list_lock);
return;
}
@@ -2081,13 +2078,11 @@ static int nvme_kthread(void *data)
if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
csts & NVME_CSTS_CFS) {
- if (work_busy(&dev->reset_work))
- continue;
- list_del_init(&dev->node);
- dev_warn(dev->dev,
- "Failed status: %x, reset controller\n",
- readl(&dev->bar->csts));
- queue_work(nvme_workq, &dev->reset_work);
+ if (!__nvme_reset(dev)) {
+ dev_warn(dev->dev,
+ "Failed status: %x, reset controller\n",
+ readl(&dev->bar->csts));
+ }
continue;
}
for (i = 0; i < dev->queue_count; i++) {
@@ -3074,19 +3069,24 @@ static void nvme_reset_work(struct work_struct *ws)
schedule_work(&dev->probe_work);
}
+static int __nvme_reset(struct nvme_dev *dev)
+{
+ if (work_pending(&dev->reset_work))
+ return -EBUSY;
+ list_del_init(&dev->node);
+ queue_work(nvme_workq, &dev->reset_work);
+ return 0;
+}
+
static int nvme_reset(struct nvme_dev *dev)
{
- int ret = -EBUSY;
+ int ret;
if (!dev->admin_q || blk_queue_dying(dev->admin_q))
return -ENODEV;
spin_lock(&dev_list_lock);
- if (!work_pending(&dev->reset_work)) {
- list_del_init(&dev->node);
- queue_work(nvme_workq, &dev->reset_work);
- ret = 0;
- }
+ ret = __nvme_reset(dev);
spin_unlock(&dev_list_lock);
if (!ret) {
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 7/8] nvme: merge nvme_dev_start, nvme_dev_resume and nvme_async_probe
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
` (5 preceding siblings ...)
2015-10-03 13:39 ` [PATCH 6/8] nvme: factor reset code into a common helper Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-03 13:39 ` [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues Christoph Hellwig
7 siblings, 0 replies; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
And give the resulting function a sensible name. This keeps all the
error handling in a single place and will allow for further improvements
to it.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Keith Busch <keith.busch at intel.com>
---
drivers/block/nvme-core.c | 53 ++++++++++++++++++-----------------------------
1 file changed, 20 insertions(+), 33 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e03a95b..61cfff3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -87,6 +87,7 @@ static struct class *nvme_class;
static int __nvme_reset(struct nvme_dev *dev);
static int nvme_reset(struct nvme_dev *dev);
static int nvme_process_cq(struct nvme_queue *nvmeq);
+static void nvme_dead_ctrl(struct nvme_dev *dev);
struct async_cmd_info {
struct kthread_work work;
@@ -2949,14 +2950,15 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
-static int nvme_dev_start(struct nvme_dev *dev)
+static void nvme_probe_work(struct work_struct *work)
{
- int result;
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
bool start_thread = false;
+ int result;
result = nvme_dev_map(dev);
if (result)
- return result;
+ goto out;
result = nvme_configure_admin_queue(dev);
if (result)
@@ -2991,7 +2993,17 @@ static int nvme_dev_start(struct nvme_dev *dev)
goto free_tags;
dev->event_limit = 1;
- return result;
+
+ if (dev->online_queues < 2) {
+ dev_warn(dev->dev, "IO queues not created\n");
+ nvme_free_queues(dev, 1);
+ nvme_dev_remove(dev);
+ } else {
+ nvme_unfreeze_queues(dev);
+ nvme_dev_add(dev);
+ }
+
+ return;
free_tags:
nvme_dev_remove_admin(dev);
@@ -3003,7 +3015,9 @@ static int nvme_dev_start(struct nvme_dev *dev)
nvme_dev_list_remove(dev);
unmap:
nvme_dev_unmap(dev);
- return result;
+ out:
+ if (!work_busy(&dev->reset_work))
+ nvme_dead_ctrl(dev);
}
static int nvme_remove_dead_ctrl(void *arg)
@@ -3017,24 +3031,6 @@ static int nvme_remove_dead_ctrl(void *arg)
return 0;
}
-static int nvme_dev_resume(struct nvme_dev *dev)
-{
- int ret;
-
- ret = nvme_dev_start(dev);
- if (ret)
- return ret;
- if (dev->online_queues < 2) {
- dev_warn(dev->dev, "IO queues not created\n");
- nvme_free_queues(dev, 1);
- nvme_dev_remove(dev);
- } else {
- nvme_unfreeze_queues(dev);
- nvme_dev_add(dev);
- }
- return 0;
-}
-
static void nvme_dead_ctrl(struct nvme_dev *dev)
{
dev_warn(dev->dev, "Device failed to resume\n");
@@ -3113,7 +3109,6 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
-static void nvme_async_probe(struct work_struct *work);
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int node, result = -ENOMEM;
@@ -3164,7 +3159,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
- INIT_WORK(&dev->probe_work, nvme_async_probe);
+ INIT_WORK(&dev->probe_work, nvme_probe_work);
schedule_work(&dev->probe_work);
return 0;
@@ -3184,14 +3179,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
-static void nvme_async_probe(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
-
- if (nvme_dev_resume(dev) && !work_busy(&dev->reset_work))
- nvme_dead_ctrl(dev);
-}
-
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
` (6 preceding siblings ...)
2015-10-03 13:39 ` [PATCH 7/8] nvme: merge nvme_dev_start, nvme_dev_resume and nvme_async_probe Christoph Hellwig
@ 2015-10-03 13:39 ` Christoph Hellwig
2015-10-08 15:52 ` Sagi Grimberg
7 siblings, 1 reply; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-03 13:39 UTC (permalink / raw)
This avoids having to clean up later in a seemingly unrelated place.
Signed-off-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: Christoph Hellwig <hch at lst.de>
---
drivers/block/nvme-core.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 61cfff3..01a6d1b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2189,6 +2189,13 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
kfree(ns);
}
+/*
+ * Create I/O queues. Failing to create an I/O queue is not an issue,
+ * we can continue with less than the desired amount of queues, and
+ * even a controller without I/O queues an still be used to issue
+ * admin commands. This might be useful to upgrade a buggy firmware
+ * for example.
+ */
static void nvme_create_io_queues(struct nvme_dev *dev)
{
unsigned i;
@@ -2198,8 +2205,10 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
break;
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
- if (nvme_create_queue(dev->queues[i], i))
+ if (nvme_create_queue(dev->queues[i], i)) {
+ nvme_free_queues(dev, i);
break;
+ }
}
static int set_queue_count(struct nvme_dev *dev, int count)
@@ -2994,9 +3003,12 @@ static void nvme_probe_work(struct work_struct *work)
dev->event_limit = 1;
+ /*
+ * Keep the controller around but remove all namespaces if we don't have
+ * any working I/O queue.
+ */
if (dev->online_queues < 2) {
dev_warn(dev->dev, "IO queues not created\n");
- nvme_free_queues(dev, 1);
nvme_dev_remove(dev);
} else {
nvme_unfreeze_queues(dev);
--
1.9.1
^ permalink raw reply related [flat|nested] 17+ messages in thread
* [PATCH 1/8] NVMe: Reference count open namespaces
2015-10-03 13:39 ` [PATCH 1/8] NVMe: Reference count open namespaces Christoph Hellwig
@ 2015-10-08 15:43 ` Sagi Grimberg
2015-10-08 15:47 ` Keith Busch
0 siblings, 1 reply; 17+ messages in thread
From: Sagi Grimberg @ 2015-10-08 15:43 UTC (permalink / raw)
On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
> From: Keith Busch <keith.busch at intel.com>
>
> Dynamic namespace attachment means the namespace may be removed at any
> time, so the namespace reference count can not be tied to the device
> reference count. This fixes a NULL dereference if an opened namespace
> is detached from a controller.
>
> Signed-off-by: Keith Busch <keith.busch at intel.com>
> Reviewed-by: Christoph Hellwig <hch at lst.de>
> ---
> drivers/block/nvme-core.c | 29 ++++++++++++++++++++---------
> include/linux/nvme.h | 1 +
> 2 files changed, 21 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
> index 6f04771..b02ae3d 100644
> --- a/drivers/block/nvme-core.c
> +++ b/drivers/block/nvme-core.c
> @@ -1943,6 +1943,18 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
> #define nvme_compat_ioctl NULL
> #endif
>
> +static void nvme_free_ns(struct kref *kref)
> +{
> + struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
> +
> + spin_lock(&dev_list_lock);
> + ns->disk->private_data = NULL;
> + spin_unlock(&dev_list_lock);
> +
> + put_disk(ns->disk);
> + kfree(ns);
> +}
> +
> static int nvme_open(struct block_device *bdev, fmode_t mode)
> {
> int ret = 0;
> @@ -1952,21 +1964,25 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
> ns = bdev->bd_disk->private_data;
> if (!ns)
> ret = -ENXIO;
> - else if (!kref_get_unless_zero(&ns->dev->kref))
> + else if (!kref_get_unless_zero(&ns->kref))
> ret = -ENXIO;
> + else if (!kref_get_unless_zero(&ns->dev->kref)) {
> + kref_put(&ns->kref, nvme_free_ns);
Is this calling nvme_free_ns with dev_list_lock taken?
> + ret = -ENXIO;
> + }
> spin_unlock(&dev_list_lock);
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 2/8] NVMe: Namespace removal simplifications
2015-10-03 13:39 ` [PATCH 2/8] NVMe: Namespace removal simplifications Christoph Hellwig
@ 2015-10-08 15:47 ` Sagi Grimberg
0 siblings, 0 replies; 17+ messages in thread
From: Sagi Grimberg @ 2015-10-08 15:47 UTC (permalink / raw)
On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
> From: Keith Busch <keith.busch at intel.com>
>
> This liberates namespace removal from the device, allowing gendisk
> references to be closed independent of the nvme controller reference
> count.
>
> Signed-off-by: Keith Busch <keith.busch at intel.com>
> Reviewed-by: Christoph Hellwig <hch at lst.de>
> ---
> drivers/block/nvme-core.c | 42 ++++++++++--------------------------------
> 1 file changed, 10 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
> index b02ae3d..904b54f 100644
> --- a/drivers/block/nvme-core.c
> +++ b/drivers/block/nvme-core.c
> @@ -1943,6 +1943,7 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
> #define nvme_compat_ioctl NULL
> #endif
>
> +static void nvme_free_dev(struct kref *kref);
> static void nvme_free_ns(struct kref *kref)
> {
> struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
> @@ -1951,6 +1952,7 @@ static void nvme_free_ns(struct kref *kref)
> ns->disk->private_data = NULL;
> spin_unlock(&dev_list_lock);
>
> + kref_put(&ns->dev->kref, nvme_free_dev);
> put_disk(ns->disk);
> kfree(ns);
> }
> @@ -1966,22 +1968,14 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
> ret = -ENXIO;
> else if (!kref_get_unless_zero(&ns->kref))
> ret = -ENXIO;
> - else if (!kref_get_unless_zero(&ns->dev->kref)) {
> - kref_put(&ns->kref, nvme_free_ns);
> - ret = -ENXIO;
> - }
And this fixes the last patch. I would suggest fixing the former though
for bisect-ability.
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 1/8] NVMe: Reference count open namespaces
2015-10-08 15:43 ` Sagi Grimberg
@ 2015-10-08 15:47 ` Keith Busch
0 siblings, 0 replies; 17+ messages in thread
From: Keith Busch @ 2015-10-08 15:47 UTC (permalink / raw)
On Thu, 8 Oct 2015, Sagi Grimberg wrote:
> On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
>> + else if (!kref_get_unless_zero(&ns->dev->kref)) {
>> + kref_put(&ns->kref, nvme_free_ns);
>
> Is this calling nvme_free_ns with dev_list_lock taken?
Yeah, patch 2/8 in this series fixes it. I should have squashed those two.
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-03 13:39 ` [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues Christoph Hellwig
@ 2015-10-08 15:52 ` Sagi Grimberg
2015-10-09 6:15 ` Christoph Hellwig
0 siblings, 1 reply; 17+ messages in thread
From: Sagi Grimberg @ 2015-10-08 15:52 UTC (permalink / raw)
On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
> This avoids having to clean up later in a seemingly unrelated place.
>
> Signed-off-by: Christoph Hellwig <hch at lst.de>
> Reviewed-by: Christoph Hellwig <hch at lst.de>
I assume Keith reviewed this one correct? :)
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-08 15:52 ` Sagi Grimberg
@ 2015-10-09 6:15 ` Christoph Hellwig
2015-10-09 14:20 ` Keith Busch
0 siblings, 1 reply; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-09 6:15 UTC (permalink / raw)
On Thu, Oct 08, 2015@06:52:24PM +0300, Sagi Grimberg wrote:
> On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
>> This avoids having to clean up later in a seemingly unrelated place.
>>
>> Signed-off-by: Christoph Hellwig <hch at lst.de>
>> Reviewed-by: Christoph Hellwig <hch at lst.de>
>
> I assume Keith reviewed this one correct? :)
He did reply with a reviewed-by tag to the previous posting of the series,
I think that qualifies.
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-09 6:15 ` Christoph Hellwig
@ 2015-10-09 14:20 ` Keith Busch
2015-10-09 14:39 ` Christoph Hellwig
0 siblings, 1 reply; 17+ messages in thread
From: Keith Busch @ 2015-10-09 14:20 UTC (permalink / raw)
On Thu, 8 Oct 2015, Christoph Hellwig wrote:
> On Thu, Oct 08, 2015@06:52:24PM +0300, Sagi Grimberg wrote:
>> On 10/3/2015 4:39 PM, Christoph Hellwig wrote:
>>> This avoids having to clean up later in a seemingly unrelated place.
>>>
>>> Signed-off-by: Christoph Hellwig <hch at lst.de>
>>> Reviewed-by: Christoph Hellwig <hch at lst.de>
>>
>> I assume Keith reviewed this one correct? :)
>
> He did reply with a reviewed-by tag to the previous posting of the series,
> I think that qualifies.
I think Sagi was just making a light of using the wrong name (your's)
in the "Reviewed-by". :)
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-09 14:20 ` Keith Busch
@ 2015-10-09 14:39 ` Christoph Hellwig
2015-10-09 16:16 ` Jens Axboe
0 siblings, 1 reply; 17+ messages in thread
From: Christoph Hellwig @ 2015-10-09 14:39 UTC (permalink / raw)
On Fri, Oct 09, 2015@02:20:38PM +0000, Keith Busch wrote:
>> He did reply with a reviewed-by tag to the previous posting of the series,
>> I think that qualifies.
>
> I think Sagi was just making a light of using the wrong name (your's)
> in the "Reviewed-by". :)
Oh, ok. Jens, do you want a resend, or can you fix it up on the fly?
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues
2015-10-09 14:39 ` Christoph Hellwig
@ 2015-10-09 16:16 ` Jens Axboe
0 siblings, 0 replies; 17+ messages in thread
From: Jens Axboe @ 2015-10-09 16:16 UTC (permalink / raw)
On 10/09/2015 08:39 AM, Christoph Hellwig wrote:
> On Fri, Oct 09, 2015@02:20:38PM +0000, Keith Busch wrote:
>>> He did reply with a reviewed-by tag to the previous posting of the series,
>>> I think that qualifies.
>>
>> I think Sagi was just making a light of using the wrong name (your's)
>> in the "Reviewed-by". :)
>
> Oh, ok. Jens, do you want a resend, or can you fix it up on the fly?
I fixed it up.
--
Jens Axboe
^ permalink raw reply [flat|nested] 17+ messages in thread
end of thread, other threads:[~2015-10-09 16:16 UTC | newest]
Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-03 13:39 nvme reset and probe updates V2 Christoph Hellwig
2015-10-03 13:39 ` [PATCH 1/8] NVMe: Reference count open namespaces Christoph Hellwig
2015-10-08 15:43 ` Sagi Grimberg
2015-10-08 15:47 ` Keith Busch
2015-10-03 13:39 ` [PATCH 2/8] NVMe: Namespace removal simplifications Christoph Hellwig
2015-10-08 15:47 ` Sagi Grimberg
2015-10-03 13:39 ` [PATCH 3/8] NVMe: Simplify device resume on io queue failure Christoph Hellwig
2015-10-03 13:39 ` [PATCH 4/8] nvme: delete dev from dev_list in nvme_reset Christoph Hellwig
2015-10-03 13:39 ` [PATCH 5/8] nvme: merge nvme_dev_reset into nvme_reset_failed_dev Christoph Hellwig
2015-10-03 13:39 ` [PATCH 6/8] nvme: factor reset code into a common helper Christoph Hellwig
2015-10-03 13:39 ` [PATCH 7/8] nvme: merge nvme_dev_start, nvme_dev_resume and nvme_async_probe Christoph Hellwig
2015-10-03 13:39 ` [PATCH 8/8] nvme: properly handle partially initialized queues in nvme_create_io_queues Christoph Hellwig
2015-10-08 15:52 ` Sagi Grimberg
2015-10-09 6:15 ` Christoph Hellwig
2015-10-09 14:20 ` Keith Busch
2015-10-09 14:39 ` Christoph Hellwig
2015-10-09 16:16 ` Jens Axboe
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.