All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sasha Levin <sashal@kernel.org>
To: linux-kernel@vger.kernel.org, stable@vger.kernel.org
Cc: Sagi Grimberg <sagi@grimberg.me>,
	Logan Gunthorpe <logang@deltatee.com>,
	Ming Lei <ming.lei@redhat.com>, Sasha Levin <sashal@kernel.org>,
	linux-nvme@lists.infradead.org
Subject: [PATCH AUTOSEL 5.2 16/44] nvme: fix controller removal race with scan work
Date: Tue, 20 Aug 2019 09:40:00 -0400	[thread overview]
Message-ID: <20190820134028.10829-16-sashal@kernel.org> (raw)
In-Reply-To: <20190820134028.10829-1-sashal@kernel.org>

From: Sagi Grimberg <sagi@grimberg.me>

[ Upstream commit 0157ec8dad3c8fc9bc9790f76e0831ffdaf2e7f0 ]

With multipath enabled, nvme_scan_work() can read from the device
(through nvme_mpath_add_disk()) and hang [1]. However, with fabrics,
once ctrl->state is set to NVME_CTRL_DELETING, the reads will hang
(see nvmf_check_ready()) and the mpath stack device make_request
will block if head->list is not empty. However, when the head->list
consistst of only DELETING/DEAD controllers, we should actually not
block, but rather fail immediately.

In addition, before we go ahead and remove the namespaces, make sure
to clear the current path and kick the requeue list so that the
request will fast fail upon requeuing.

[1]:
--
  INFO: task kworker/u4:3:166 blocked for more than 120 seconds.
        Not tainted 5.2.0-rc6-vmlocalyes-00005-g808c8c2dc0cf #316
  "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
  kworker/u4:3    D    0   166      2 0x80004000
  Workqueue: nvme-wq nvme_scan_work
  Call Trace:
   __schedule+0x851/0x1400
   schedule+0x99/0x210
   io_schedule+0x21/0x70
   do_read_cache_page+0xa57/0x1330
   read_cache_page+0x4a/0x70
   read_dev_sector+0xbf/0x380
   amiga_partition+0xc4/0x1230
   check_partition+0x30f/0x630
   rescan_partitions+0x19a/0x980
   __blkdev_get+0x85a/0x12f0
   blkdev_get+0x2a5/0x790
   __device_add_disk+0xe25/0x1250
   device_add_disk+0x13/0x20
   nvme_mpath_set_live+0x172/0x2b0
   nvme_update_ns_ana_state+0x130/0x180
   nvme_set_ns_ana_state+0x9a/0xb0
   nvme_parse_ana_log+0x1c3/0x4a0
   nvme_mpath_add_disk+0x157/0x290
   nvme_validate_ns+0x1017/0x1bd0
   nvme_scan_work+0x44d/0x6a0
   process_one_work+0x7d7/0x1240
   worker_thread+0x8e/0xff0
   kthread+0x2c3/0x3b0
   ret_from_fork+0x35/0x40

   INFO: task kworker/u4:1:1034 blocked for more than 120 seconds.
        Not tainted 5.2.0-rc6-vmlocalyes-00005-g808c8c2dc0cf #316
  "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
  kworker/u4:1    D    0  1034      2 0x80004000
  Workqueue: nvme-delete-wq nvme_delete_ctrl_work
  Call Trace:
   __schedule+0x851/0x1400
   schedule+0x99/0x210
   schedule_timeout+0x390/0x830
   wait_for_completion+0x1a7/0x310
   __flush_work+0x241/0x5d0
   flush_work+0x10/0x20
   nvme_remove_namespaces+0x85/0x3d0
   nvme_do_delete_ctrl+0xb4/0x1e0
   nvme_delete_ctrl_work+0x15/0x20
   process_one_work+0x7d7/0x1240
   worker_thread+0x8e/0xff0
   kthread+0x2c3/0x3b0
   ret_from_fork+0x35/0x40
--

Reported-by: Logan Gunthorpe <logang@deltatee.com>
Tested-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
 drivers/nvme/host/core.c      |  7 ++++++
 drivers/nvme/host/multipath.c | 46 ++++++++++++++++++++++++++++++-----
 drivers/nvme/host/nvme.h      |  9 +++++--
 3 files changed, 54 insertions(+), 8 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 05301b94e2fa0..601509b3251ae 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3529,6 +3529,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 	struct nvme_ns *ns, *next;
 	LIST_HEAD(ns_list);
 
+	/*
+	 * make sure to requeue I/O to all namespaces as these
+	 * might result from the scan itself and must complete
+	 * for the scan_work to make progress
+	 */
+	nvme_mpath_clear_ctrl_paths(ctrl);
+
 	/* prevent racing with ns scanning */
 	flush_work(&ctrl->scan_work);
 
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index dafb9e4aa1237..747c0d4f9ff5b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -134,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
 	[NVME_ANA_CHANGE]		= "change",
 };
 
-void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
 	struct nvme_ns_head *head = ns->head;
+	bool changed = false;
 	int node;
 
 	if (!head)
-		return;
+		goto out;
 
 	for_each_node(node) {
-		if (ns == rcu_access_pointer(head->current_path[node]))
+		if (ns == rcu_access_pointer(head->current_path[node])) {
 			rcu_assign_pointer(head->current_path[node], NULL);
+			changed = true;
+		}
 	}
+out:
+	return changed;
+}
+
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+	struct nvme_ns *ns;
+
+	mutex_lock(&ctrl->scan_lock);
+	list_for_each_entry(ns, &ctrl->namespaces, list)
+		if (nvme_mpath_clear_current_path(ns))
+			kblockd_schedule_work(&ns->head->requeue_work);
+	mutex_unlock(&ctrl->scan_lock);
 }
 
 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
@@ -248,6 +264,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
 	return ns;
 }
 
+static bool nvme_available_path(struct nvme_ns_head *head)
+{
+	struct nvme_ns *ns;
+
+	list_for_each_entry_rcu(ns, &head->list, siblings) {
+		switch (ns->ctrl->state) {
+		case NVME_CTRL_LIVE:
+		case NVME_CTRL_RESETTING:
+		case NVME_CTRL_CONNECTING:
+			/* fallthru */
+			return true;
+		default:
+			break;
+		}
+	}
+	return false;
+}
+
 static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
 		struct bio *bio)
 {
@@ -274,14 +308,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
 				      disk_devt(ns->head->disk),
 				      bio->bi_iter.bi_sector);
 		ret = direct_make_request(bio);
-	} else if (!list_empty_careful(&head->list)) {
-		dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
+	} else if (nvme_available_path(head)) {
+		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
 
 		spin_lock_irq(&head->requeue_lock);
 		bio_list_add(&head->requeue_list, bio);
 		spin_unlock_irq(&head->requeue_lock);
 	} else {
-		dev_warn_ratelimited(dev, "no path - failing I/O\n");
+		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
 
 		bio->bi_status = BLK_STS_IOERR;
 		bio_endio(bio);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b8b45822f7be0..81215ca32671a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -490,7 +490,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-void nvme_mpath_clear_current_path(struct nvme_ns *ns);
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
 
 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -538,7 +539,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
 }
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+	return false;
+}
+static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
 {
 }
 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-- 
2.20.1


  parent reply	other threads:[~2019-08-20 13:47 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-20 13:39 [PATCH AUTOSEL 5.2 01/44] dmaengine: ste_dma40: fix unneeded variable warning Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 02/44] nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 03/44] afs: Fix the CB.ProbeUuid service handler to reply correctly Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 04/44] afs: Fix loop index mixup in afs_deliver_vl_get_entry_by_name_u() Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 05/44] fs: afs: Fix a possible null-pointer dereference in afs_put_read() Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 06/44] afs: Fix off-by-one in afs_rename() expected data version calculation Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 07/44] afs: Only update d_fsdata if different in afs_d_revalidate() Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 08/44] afs: Fix missing dentry data version updating Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 09/44] intel_th: Use the correct style for SPDX License Identifier Sasha Levin
2019-08-20 14:27   ` Greg Kroah-Hartman
2019-08-20 20:03     ` Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 10/44] nvmet: Fix use-after-free bug when a port is removed Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 11/44] nvmet-loop: Flush nvme_delete_wq when removing the port Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 12/44] nvmet-file: fix nvmet_file_flush() always returning an error Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 13/44] nvme-core: Fix extra device_put() call on error path Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 14/44] nvme: fix a possible deadlock when passthru commands sent to a multipath device Sasha Levin
2019-08-20 13:39 ` [PATCH AUTOSEL 5.2 15/44] nvme-rdma: fix possible use-after-free in connect error flow Sasha Levin
2019-08-20 13:40 ` Sasha Levin [this message]
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 17/44] nvme-pci: Fix async probe remove race Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 18/44] soundwire: cadence_master: fix register definition for SLAVE_STATE Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 19/44] soundwire: cadence_master: fix definitions for INTSTAT0/1 Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 20/44] iio: adc: max9611: Fix temperature reading in probe Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 21/44] auxdisplay: panel: need to delete scan_timer when misc_register fails in panel_attach Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 22/44] btrfs: trim: Check the range passed into to prevent overflow Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 23/44] IB/mlx5: Fix implicit MR release flow Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 24/44] dmaengine: stm32-mdma: Fix a possible null-pointer dereference in stm32_mdma_irq_handler() Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 25/44] omap-dma/omap_vout_vrfb: fix off-by-one fi value Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 26/44] iommu/dma: Handle SG length overflow better Sasha Levin
2019-08-20 13:40   ` Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 27/44] dma-direct: don't truncate dma_required_mask to bus addressing capabilities Sasha Levin
2019-08-20 13:40   ` Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 28/44] usb: gadget: composite: Clear "suspended" on reset/disconnect Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 29/44] usb: gadget: mass_storage: Fix races between fsg_disable and fsg_set_alt Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 30/44] habanalabs: fix DRAM usage accounting on context tear down Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 31/44] habanalabs: fix endianness handling for packets from user Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 32/44] habanalabs: fix completion queue handling when host is BE Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 33/44] habanalabs: fix endianness handling for internal QMAN submission Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 34/44] habanalabs: fix device IRQ unmasking for BE host Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 35/44] xen/blkback: fix memory leaks Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 36/44] arm64: cpufeature: Don't treat granule sizes as strict Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 37/44] riscv: fix flush_tlb_range() end address for flush_tlb_page() Sasha Levin
2019-08-20 13:40   ` Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 38/44] i2c: rcar: avoid race when unregistering slave client Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 39/44] i2c: emev2: " Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 40/44] drm/scheduler: use job count instead of peek Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 41/44] drm/ast: Fixed reboot test may cause system hanged Sasha Levin
2019-08-20 13:40   ` Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 42/44] usb: host: fotg2: restart hcd after port reset Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 43/44] tools: hv: fixed Python pep8/flake8 warnings for lsvmbus Sasha Levin
2019-08-20 13:40 ` [PATCH AUTOSEL 5.2 44/44] tools: hv: fix KVP and VSS daemons exit code Sasha Levin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190820134028.10829-16-sashal@kernel.org \
    --to=sashal@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=logang@deltatee.com \
    --cc=ming.lei@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.