All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/13] Device mapper and dm-mpath patches
@ 2017-04-26 18:37 Bart Van Assche
  2017-04-26 18:37   ` Bart Van Assche
                   ` (12 more replies)
  0 siblings, 13 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel

Hello Mike,

The patches in this series are:
* A few fixes for bugs in the dm-mpath driver I ran into while testing
  this driver.
* A resend of various dm / dm-mpath patches I had posted before but
  for which I'm still waiting for a review from you.

Please consider at least the dm-mpath bug fixes for kernel v4.12.

Thanks,

Bart.

Bart Van Assche (13):
  dm-mpath: Split activate_path()
  dm-mpath: Avoid that path removal can trigger an infinite loop
  dm-mpath: Delay requeuing while path initialization is in progress
  dm-rq: Adjust requeuing delays
  dm-mpath: Make it easier to analyze requeuing behavior
  dm-rq: Check blk_mq_register_dev() return value
  dm, persistence: Remove an unused argument from
    dm_block_manager_create()
  dm: Verify suspend_locking assumptions at runtime
  dm-mpath: Verify locking assumptions at runtime
  dm: Introduce enum dm_queue_mode
  dm-mpath: Micro-optimize the hot path
  dm-mpath: Introduce assign_bit()
  dm, dm-mpath: Make it easier to detect unintended I/O request flushes

 drivers/md/dm-cache-metadata.c                |   3 -
 drivers/md/dm-core.h                          |   2 +-
 drivers/md/dm-era-target.c                    |   2 -
 drivers/md/dm-ioctl.c                         |   2 +-
 drivers/md/dm-mpath.c                         | 187 +++++++++++++-------------
 drivers/md/dm-rq.c                            |  15 ++-
 drivers/md/dm-table.c                         |  18 ++-
 drivers/md/dm-thin-metadata.c                 |   2 -
 drivers/md/dm.c                               |  26 ++--
 drivers/md/dm.h                               |  12 +-
 drivers/md/persistent-data/dm-block-manager.c |   1 -
 drivers/md/persistent-data/dm-block-manager.h |   2 +-
 include/linux/device-mapper.h                 |  14 +-
 13 files changed, 148 insertions(+), 138 deletions(-)

-- 
2.12.2

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 01/13] dm-mpath: Split activate_path()
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
@ 2017-04-26 18:37   ` Bart Van Assche
  2017-04-26 18:37   ` Bart Van Assche
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

This patch does not change any functionality but makes the next
patch in this series easier to read.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7f223dbed49f..909098e18643 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -111,7 +111,8 @@ typedef int (*action_fn) (struct pgpath *pgpath);
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
 
 /*-----------------------------------------------
@@ -136,7 +137,7 @@ static struct pgpath *alloc_pgpath(void)
 
 	if (pgpath) {
 		pgpath->is_active = true;
-		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
 	}
 
 	return pgpath;
@@ -1437,10 +1438,8 @@ static void pg_init_done(void *data, int errors)
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-static void activate_path(struct work_struct *work)
+static void activate_path(struct pgpath *pgpath)
 {
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path.work);
 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
 	if (pgpath->is_active && !blk_queue_dying(q))
@@ -1449,6 +1448,11 @@ static void activate_path(struct work_struct *work)
 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
 
+static void activate_path_work(struct work_struct *work)
+{
+	activate_path(container_of(work, struct pgpath, activate_path.work));
+}
+
 static int noretry_error(int error)
 {
 	switch (error) {
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 01/13] dm-mpath: Split activate_path()
@ 2017-04-26 18:37   ` Bart Van Assche
  0 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

This patch does not change any functionality but makes the next
patch in this series easier to read.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7f223dbed49f..909098e18643 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -111,7 +111,8 @@ typedef int (*action_fn) (struct pgpath *pgpath);
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
 
 /*-----------------------------------------------
@@ -136,7 +137,7 @@ static struct pgpath *alloc_pgpath(void)
 
 	if (pgpath) {
 		pgpath->is_active = true;
-		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
 	}
 
 	return pgpath;
@@ -1437,10 +1438,8 @@ static void pg_init_done(void *data, int errors)
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-static void activate_path(struct work_struct *work)
+static void activate_path(struct pgpath *pgpath)
 {
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path.work);
 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
 	if (pgpath->is_active && !blk_queue_dying(q))
@@ -1449,6 +1448,11 @@ static void activate_path(struct work_struct *work)
 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
 
+static void activate_path_work(struct work_struct *work)
+{
+	activate_path(container_of(work, struct pgpath, activate_path.work));
+}
+
 static int noretry_error(int error)
 {
 	switch (error) {
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
@ 2017-04-26 18:37   ` Bart Van Assche
  2017-04-26 18:37   ` Bart Van Assche
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

If blk_get_request() fails check whether the failure is due to
a path being removed. If that is the case fail the path by
triggering a call to fail_path(). This patch avoids that the
following scenario can be encountered while removing paths:
* CPU usage of a kworker thread jumps to 100%.
* Removing the dm device becomes impossible.

Delay requeueing if blk_get_request() returns -EBUSY or
-EWOULDBLOCK because in these cases immediate requeuing is
inappropriate.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 909098e18643..6d4333fdddf5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -490,6 +490,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 	struct pgpath *pgpath;
 	struct block_device *bdev;
 	struct dm_mpath_io *mpio = get_mpio(map_context);
+	struct request_queue *q;
 	struct request *clone;
 
 	/* Do we need to select a new pgpath? */
@@ -512,13 +513,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 	mpio->nr_bytes = nr_bytes;
 
 	bdev = pgpath->path.dev->bdev;
-
-	clone = blk_get_request(bdev_get_queue(bdev),
-			rq->cmd_flags | REQ_NOMERGE,
-			GFP_ATOMIC);
+	q = bdev_get_queue(bdev);
+	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
 	if (IS_ERR(clone)) {
 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
-		return r;
+		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
+			 PTR_ERR(clone), blk_queue_dying(q) ?
+			 " (path offline)" : "");
+		if (blk_queue_dying(q)) {
+			atomic_inc(&m->pg_init_in_progress);
+			activate_path(pgpath);
+			return DM_MAPIO_REQUEUE;
+		}
+		return DM_MAPIO_DELAY_REQUEUE;
 	}
 	clone->bio = clone->biotail = NULL;
 	clone->rq_disk = bdev->bd_disk;
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
@ 2017-04-26 18:37   ` Bart Van Assche
  0 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

If blk_get_request() fails check whether the failure is due to
a path being removed. If that is the case fail the path by
triggering a call to fail_path(). This patch avoids that the
following scenario can be encountered while removing paths:
* CPU usage of a kworker thread jumps to 100%.
* Removing the dm device becomes impossible.

Delay requeueing if blk_get_request() returns -EBUSY or
-EWOULDBLOCK because in these cases immediate requeuing is
inappropriate.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 909098e18643..6d4333fdddf5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -490,6 +490,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 	struct pgpath *pgpath;
 	struct block_device *bdev;
 	struct dm_mpath_io *mpio = get_mpio(map_context);
+	struct request_queue *q;
 	struct request *clone;
 
 	/* Do we need to select a new pgpath? */
@@ -512,13 +513,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 	mpio->nr_bytes = nr_bytes;
 
 	bdev = pgpath->path.dev->bdev;
-
-	clone = blk_get_request(bdev_get_queue(bdev),
-			rq->cmd_flags | REQ_NOMERGE,
-			GFP_ATOMIC);
+	q = bdev_get_queue(bdev);
+	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
 	if (IS_ERR(clone)) {
 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
-		return r;
+		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
+			 PTR_ERR(clone), blk_queue_dying(q) ?
+			 " (path offline)" : "");
+		if (blk_queue_dying(q)) {
+			atomic_inc(&m->pg_init_in_progress);
+			activate_path(pgpath);
+			return DM_MAPIO_REQUEUE;
+		}
+		return DM_MAPIO_DELAY_REQUEUE;
 	}
 	clone->bio = clone->biotail = NULL;
 	clone->rq_disk = bdev->bd_disk;
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 03/13] dm-mpath: Delay requeuing while path initialization is in progress
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
@ 2017-04-26 18:37   ` Bart Van Assche
  2017-04-26 18:37   ` Bart Van Assche
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

Requeuing a request immediately while path initialization is ongoing
causes high CPU usage, something that is undesired. Hence delay
requeuing while path initialization is in progress.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6d4333fdddf5..e38c92178746 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -322,13 +322,16 @@ static int __pg_init_all_paths(struct multipath *m)
 	return atomic_read(&m->pg_init_in_progress);
 }
 
-static void pg_init_all_paths(struct multipath *m)
+static int pg_init_all_paths(struct multipath *m)
 {
 	unsigned long flags;
+	int ret;
 
 	spin_lock_irqsave(&m->lock, flags);
-	__pg_init_all_paths(m);
+	ret = __pg_init_all_paths(m);
 	spin_unlock_irqrestore(&m->lock, flags);
+
+	return ret;
 }
 
 static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -485,7 +488,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 				   struct request **__clone)
 {
 	struct multipath *m = ti->private;
-	int r = DM_MAPIO_REQUEUE;
 	size_t nr_bytes = blk_rq_bytes(rq);
 	struct pgpath *pgpath;
 	struct block_device *bdev;
@@ -504,8 +506,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 		return -EIO;	/* Failed */
 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
-		pg_init_all_paths(m);
-		return r;
+		return pg_init_all_paths(m) == 0 ? DM_MAPIO_REQUEUE :
+			DM_MAPIO_DELAY_REQUEUE;
 	}
 
 	memset(mpio, 0, sizeof(*mpio));
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 03/13] dm-mpath: Delay requeuing while path initialization is in progress
@ 2017-04-26 18:37   ` Bart Van Assche
  0 siblings, 0 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: dm-devel, Bart Van Assche, Hannes Reinecke, Christoph Hellwig, stable

Requeuing a request immediately while path initialization is ongoing
causes high CPU usage, something that is undesired. Hence delay
requeuing while path initialization is in progress.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
---
 drivers/md/dm-mpath.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6d4333fdddf5..e38c92178746 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -322,13 +322,16 @@ static int __pg_init_all_paths(struct multipath *m)
 	return atomic_read(&m->pg_init_in_progress);
 }
 
-static void pg_init_all_paths(struct multipath *m)
+static int pg_init_all_paths(struct multipath *m)
 {
 	unsigned long flags;
+	int ret;
 
 	spin_lock_irqsave(&m->lock, flags);
-	__pg_init_all_paths(m);
+	ret = __pg_init_all_paths(m);
 	spin_unlock_irqrestore(&m->lock, flags);
+
+	return ret;
 }
 
 static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -485,7 +488,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 				   struct request **__clone)
 {
 	struct multipath *m = ti->private;
-	int r = DM_MAPIO_REQUEUE;
 	size_t nr_bytes = blk_rq_bytes(rq);
 	struct pgpath *pgpath;
 	struct block_device *bdev;
@@ -504,8 +506,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 		return -EIO;	/* Failed */
 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
-		pg_init_all_paths(m);
-		return r;
+		return pg_init_all_paths(m) == 0 ? DM_MAPIO_REQUEUE :
+			DM_MAPIO_DELAY_REQUEUE;
 	}
 
 	memset(mpio, 0, sizeof(*mpio));
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 04/13] dm-rq: Adjust requeuing delays
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (2 preceding siblings ...)
  2017-04-26 18:37   ` Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:48   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior Bart Van Assche
                   ` (8 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: Bart Van Assche, dm-devel, Hannes Reinecke, Christoph Hellwig

Reduce the requeue delay in dm_requeue_original_request() from 5s
to 0.5s to avoid that this delay slows down failover or failback.
Increase the requeue delay in dm_mq_queue_rq() from 0.1s to 0.5s
to reduce the system load if immediate requeuing has been requested
by the dm driver.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
---
 drivers/md/dm-rq.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 0b081d170087..c53debdcd7dc 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -280,7 +280,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
 	if (!rq->q->mq_ops)
 		dm_old_requeue_request(rq);
 	else
-		dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
+		dm_mq_delay_requeue_request(rq, delay_requeue ? 500/*ms*/ : 0);
 
 	rq_completed(md, rw, false);
 }
@@ -755,7 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 		/* Undo dm_start_request() before requeuing */
 		rq_end_stats(md, rq);
 		rq_completed(md, rq_data_dir(rq), false);
-		blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
+		blk_mq_delay_run_hw_queue(hctx, 500/*ms*/);
 		return BLK_MQ_RQ_QUEUE_BUSY;
 	}
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (3 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 04/13] dm-rq: Adjust requeuing delays Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:49   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value Bart Van Assche
                   ` (7 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: Bart Van Assche, dm-devel, Hannes Reinecke, Christoph Hellwig

When debugging the dm-mpath driver it is important to know what
decisions have been taken with regard to requeuing. Hence this
patch that adds pr_debug() statements that report what decisions
have been taken.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
---
 drivers/md/dm-mpath.c | 21 ++++++++++++++++++---
 drivers/md/dm-rq.c    |  5 ++++-
 2 files changed, 22 insertions(+), 4 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e38c92178746..fb4b7228fe5f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -501,8 +501,10 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 		pgpath = choose_pgpath(m, nr_bytes);
 
 	if (!pgpath) {
-		if (must_push_back_rq(m))
+		if (must_push_back_rq(m)) {
+			pr_debug("no path - requeueing\n");
 			return DM_MAPIO_DELAY_REQUEUE;
+		}
 		return -EIO;	/* Failed */
 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
@@ -1425,17 +1427,23 @@ static void pg_init_done(void *data, int errors)
 		/* Activations of other paths are still on going */
 		goto out;
 
+	pr_debug("pg_init_in_progress = %d\n",
+		 atomic_read(&m->pg_init_in_progress));
+
 	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 		if (delay_retry)
 			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 		else
 			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 
-		if (__pg_init_all_paths(m))
+		if (__pg_init_all_paths(m)) {
+			pr_debug("__pg_init_all_paths() reported that initialization is ongoing\n");
 			goto out;
+		}
 	}
 	clear_bit(MPATHF_QUEUE_IO, &m->flags);
 
+	pr_debug("Processing queued I/O list\n");
 	process_queued_io_list(m);
 
 	/*
@@ -1929,8 +1937,11 @@ static int multipath_busy(struct dm_target *ti)
 	struct pgpath *pgpath;
 
 	/* pg_init in progress */
-	if (atomic_read(&m->pg_init_in_progress))
+	if (atomic_read(&m->pg_init_in_progress)) {
+		pr_debug("pg_init_in_progress = %d\n",
+			 atomic_read(&m->pg_init_in_progress));
 		return true;
+	}
 
 	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
 	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
@@ -1977,6 +1988,10 @@ static int multipath_busy(struct dm_target *ti)
 		busy = false;
 	}
 
+
+	if (busy)
+		pr_debug("all active paths are busy\n");
+
 	return busy;
 }
 
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index c53debdcd7dc..ba5694be55a4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -737,8 +737,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 		dm_put_live_table(md, srcu_idx);
 	}
 
-	if (ti->type->busy && ti->type->busy(ti))
+	if (ti->type->busy && ti->type->busy(ti)) {
+		pr_debug("ti->type->busy()\n");
 		return BLK_MQ_RQ_QUEUE_BUSY;
+	}
 
 	dm_start_request(md, rq);
 
@@ -756,6 +758,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 		rq_end_stats(md, rq);
 		rq_completed(md, rq_data_dir(rq), false);
 		blk_mq_delay_run_hw_queue(hctx, 500/*ms*/);
+		pr_debug("DM_MAPIO_REQUEUE\n");
 		return BLK_MQ_RQ_QUEUE_BUSY;
 	}
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (4 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:49   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create() Bart Van Assche
                   ` (6 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer
  Cc: Bart Van Assche, dm-devel, Hannes Reinecke, Christoph Hellwig

blk_mq_register_dev() can fail. Hence check the return value of
that function.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
---
 drivers/md/dm-rq.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index ba5694be55a4..3ff7280f5dc5 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -813,10 +813,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
 	dm_init_md_queue(md);
 
 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
-	blk_mq_register_dev(disk_to_dev(md->disk), q);
+	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
+	if (err)
+		goto free_queue;
 
 	return 0;
 
+free_queue:
+	blk_cleanup_queue(q);
 out_tag_set:
 	blk_mq_free_tag_set(md->tag_set);
 out_kfree_tag_set:
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create()
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (5 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:49   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime Bart Van Assche
                   ` (5 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

The 'cache_size' argument of dm_block_manager_create() has never
been used. Hence remove it and also the definitions of the constants
passed as 'cache_size' argument.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-cache-metadata.c                | 3 ---
 drivers/md/dm-era-target.c                    | 2 --
 drivers/md/dm-thin-metadata.c                 | 2 --
 drivers/md/persistent-data/dm-block-manager.c | 1 -
 drivers/md/persistent-data/dm-block-manager.h | 2 +-
 5 files changed, 1 insertion(+), 9 deletions(-)

diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6735c8d6a445..8568dbd50ba4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -27,8 +27,6 @@
 #define MIN_CACHE_VERSION 1
 #define MAX_CACHE_VERSION 2
 
-#define CACHE_METADATA_CACHE_SIZE 64
-
 /*
  *  3 for btree insert +
  *  2 for btree lookup used within space map
@@ -535,7 +533,6 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
 {
 	int r;
 	cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
-					  CACHE_METADATA_CACHE_SIZE,
 					  CACHE_MAX_CONCURRENT_LOCKS);
 	if (IS_ERR(cmd->bm)) {
 		DMERR("could not create block manager");
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 9fab33b113c4..b13751d78b9e 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -254,7 +254,6 @@ static struct dm_block_validator sb_validator = {
  * Low level metadata handling
  *--------------------------------------------------------------*/
 #define DM_ERA_METADATA_BLOCK_SIZE 4096
-#define DM_ERA_METADATA_CACHE_SIZE 64
 #define ERA_MAX_CONCURRENT_LOCKS 5
 
 struct era_metadata {
@@ -615,7 +614,6 @@ static int create_persistent_data_objects(struct era_metadata *md,
 	int r;
 
 	md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
-					 DM_ERA_METADATA_CACHE_SIZE,
 					 ERA_MAX_CONCURRENT_LOCKS);
 	if (IS_ERR(md->bm)) {
 		DMERR("could not create block manager");
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index a15091a0d40c..0f0251d0d337 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -77,7 +77,6 @@
 #define THIN_SUPERBLOCK_MAGIC 27022010
 #define THIN_SUPERBLOCK_LOCATION 0
 #define THIN_VERSION 2
-#define THIN_METADATA_CACHE_SIZE 64
 #define SECTOR_TO_BLOCK_SHIFT 3
 
 /*
@@ -686,7 +685,6 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
 	int r;
 
 	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
-					  THIN_METADATA_CACHE_SIZE,
 					  THIN_MAX_CONCURRENT_LOCKS);
 	if (IS_ERR(pmd->bm)) {
 		DMERR("could not create block manager");
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 8589e0a14068..ea15d220ced7 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -378,7 +378,6 @@ struct dm_block_manager {
 
 struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
 						 unsigned block_size,
-						 unsigned cache_size,
 						 unsigned max_held_per_thread)
 {
 	int r;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 3627d1b7667a..e728937f376a 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -33,7 +33,7 @@ void *dm_block_data(struct dm_block *b);
 struct dm_block_manager;
 struct dm_block_manager *dm_block_manager_create(
 	struct block_device *bdev, unsigned block_size,
-	unsigned cache_size, unsigned max_held_per_thread);
+	unsigned max_held_per_thread);
 void dm_block_manager_destroy(struct dm_block_manager *bm);
 
 unsigned dm_bm_block_size(struct dm_block_manager *bm);
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (6 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create() Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:50   ` Hannes Reinecke
  2017-04-27  6:05   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 09/13] dm-mpath: Verify locking " Bart Van Assche
                   ` (4 subsequent siblings)
  12 siblings, 2 replies; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

Ensure that the assumptions about the caller holding suspend_lock
are checked at runtime if lockdep is enabled.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-table.c | 4 ++++
 drivers/md/dm.c       | 9 ++++-----
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3ad16d9c9d5a..92dbc85af53a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1661,6 +1661,8 @@ static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
 	int i = t->num_targets;
 	struct dm_target *ti = t->targets;
 
+	lockdep_assert_held(&t->md->suspend_lock);
+
 	while (i--) {
 		switch (mode) {
 		case PRESUSPEND:
@@ -1708,6 +1710,8 @@ int dm_table_resume_targets(struct dm_table *t)
 {
 	int i, r = 0;
 
+	lockdep_assert_held(&t->md->suspend_lock);
+
 	for (i = 0; i < t->num_targets; i++) {
 		struct dm_target *ti = t->targets + i;
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dfb75979e455..78706a04bab4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1686,11 +1686,10 @@ static void event_callback(void *context)
 	wake_up(&md->eventq);
 }
 
-/*
- * Protected by md->suspend_lock obtained by dm_swap_table().
- */
 static void __set_size(struct mapped_device *md, sector_t size)
 {
+	lockdep_assert_held(&md->suspend_lock);
+
 	set_capacity(md->disk, size);
 
 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
@@ -2140,8 +2139,6 @@ static void unlock_fs(struct mapped_device *md)
  * If __dm_suspend returns 0, the device is completely quiescent
  * now. There is no request-processing activity. All new requests
  * are being added to md->deferred list.
- *
- * Caller must hold md->suspend_lock
  */
 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
 			unsigned suspend_flags, long task_state,
@@ -2357,6 +2354,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
 {
 	struct dm_table *map = NULL;
 
+	lockdep_assert_held(&md->suspend_lock);
+
 	if (md->internal_suspend_count++)
 		return; /* nested internal suspend */
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 09/13] dm-mpath: Verify locking assumptions at runtime
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (7 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:50   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 10/13] dm: Introduce enum dm_queue_mode Bart Van Assche
                   ` (3 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

Verify at runtime that __pg_init_all_paths() is called with
multipath.lock held if lockdep is enabled.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-mpath.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fb4b7228fe5f..312d4fc34430 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -298,6 +298,8 @@ static int __pg_init_all_paths(struct multipath *m)
 	struct pgpath *pgpath;
 	unsigned long pg_init_delay = 0;
 
+	lockdep_assert_held(&m->lock);
+
 	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 		return 0;
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 10/13] dm: Introduce enum dm_queue_mode
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (8 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 09/13] dm-mpath: Verify locking " Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  5:51   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 11/13] dm-mpath: Micro-optimize the hot path Bart Van Assche
                   ` (2 subsequent siblings)
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

Introduce an enumeration type for the queue mode. This patch does
not change any functionality but makes the dm code easier to read.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-core.h          |  2 +-
 drivers/md/dm-ioctl.c         |  2 +-
 drivers/md/dm-mpath.c         |  5 ++++-
 drivers/md/dm-table.c         | 14 +++++++-------
 drivers/md/dm.c               | 14 +++++++++-----
 drivers/md/dm.h               | 12 +++++++-----
 include/linux/device-mapper.h | 14 ++++++++------
 7 files changed, 37 insertions(+), 26 deletions(-)

diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 136fda3ff9e5..b92f74d9a982 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -47,7 +47,7 @@ struct mapped_device {
 	struct request_queue *queue;
 	int numa_node_id;
 
-	unsigned type;
+	enum dm_queue_mode type;
 	/* Protect queue and type against concurrent access. */
 	struct mutex type_lock;
 
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4da6fc6b1ffd..9b1ce440fc70 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1268,7 +1268,7 @@ static int populate_table(struct dm_table *table,
 	return dm_table_complete(table);
 }
 
-static bool is_valid_type(unsigned cur, unsigned new)
+static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
 {
 	if (cur == new ||
 	    (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 312d4fc34430..adde8a51e020 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -90,7 +90,7 @@ struct multipath {
 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
 	atomic_t pg_init_count;		/* Number of times pg_init called */
 
-	unsigned queue_mode;
+	enum dm_queue_mode queue_mode;
 
 	struct mutex work_mutex;
 	struct work_struct trigger_event;
@@ -1704,6 +1704,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
 			case DM_TYPE_MQ_REQUEST_BASED:
 				DMEMIT("queue_mode mq ");
 				break;
+			default:
+				WARN_ON_ONCE(true);
+				break;
 			}
 		}
 	}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 92dbc85af53a..6692571336c7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -30,7 +30,7 @@
 
 struct dm_table {
 	struct mapped_device *md;
-	unsigned type;
+	enum dm_queue_mode type;
 
 	/* btree table */
 	unsigned int depth;
@@ -821,19 +821,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 }
 EXPORT_SYMBOL(dm_consume_args);
 
-static bool __table_type_bio_based(unsigned table_type)
+static bool __table_type_bio_based(enum dm_queue_mode table_type)
 {
 	return (table_type == DM_TYPE_BIO_BASED ||
 		table_type == DM_TYPE_DAX_BIO_BASED);
 }
 
-static bool __table_type_request_based(unsigned table_type)
+static bool __table_type_request_based(enum dm_queue_mode table_type)
 {
 	return (table_type == DM_TYPE_REQUEST_BASED ||
 		table_type == DM_TYPE_MQ_REQUEST_BASED);
 }
 
-void dm_table_set_type(struct dm_table *t, unsigned type)
+void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
 {
 	t->type = type;
 }
@@ -875,7 +875,7 @@ static int dm_table_determine_type(struct dm_table *t)
 	struct dm_target *tgt;
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
-	unsigned live_md_type = dm_get_md_type(t->md);
+	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
 
 	if (t->type != DM_TYPE_NONE) {
 		/* target already set the table's type */
@@ -984,7 +984,7 @@ static int dm_table_determine_type(struct dm_table *t)
 	return 0;
 }
 
-unsigned dm_table_get_type(struct dm_table *t)
+enum dm_queue_mode dm_table_get_type(struct dm_table *t)
 {
 	return t->type;
 }
@@ -1035,7 +1035,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t)
 
 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 {
-	unsigned type = dm_table_get_type(t);
+	enum dm_queue_mode type = dm_table_get_type(t);
 	unsigned per_io_data_size = 0;
 	struct dm_target *tgt;
 	unsigned i;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 78706a04bab4..7161c7804363 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1797,13 +1797,13 @@ void dm_unlock_md_type(struct mapped_device *md)
 	mutex_unlock(&md->type_lock);
 }
 
-void dm_set_md_type(struct mapped_device *md, unsigned type)
+void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
 {
 	BUG_ON(!mutex_is_locked(&md->type_lock));
 	md->type = type;
 }
 
-unsigned dm_get_md_type(struct mapped_device *md)
+enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
 {
 	return md->type;
 }
@@ -1830,7 +1830,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 {
 	int r;
-	unsigned type = dm_get_md_type(md);
+	enum dm_queue_mode type = dm_get_md_type(md);
 
 	switch (type) {
 	case DM_TYPE_REQUEST_BASED:
@@ -1861,6 +1861,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 		if (type == DM_TYPE_DAX_BIO_BASED)
 			queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
 		break;
+	case DM_TYPE_NONE:
+		WARN_ON_ONCE(true);
 	}
 
 	return 0;
@@ -2546,8 +2548,10 @@ int dm_noflush_suspending(struct dm_target *ti)
 }
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
-					    unsigned integrity, unsigned per_io_data_size)
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md,
+					    enum dm_queue_mode type,
+					    unsigned integrity,
+					    unsigned per_io_data_size)
 {
 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
 	unsigned int pool_size = 0;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f298b01f7ab3..4b3b8587cb72 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -64,7 +64,7 @@ void dm_table_presuspend_undo_targets(struct dm_table *t);
 void dm_table_postsuspend_targets(struct dm_table *t);
 int dm_table_resume_targets(struct dm_table *t);
 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-unsigned dm_table_get_type(struct dm_table *t);
+enum dm_queue_mode dm_table_get_type(struct dm_table *t);
 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
@@ -76,8 +76,8 @@ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
 
 void dm_lock_md_type(struct mapped_device *md);
 void dm_unlock_md_type(struct mapped_device *md);
-void dm_set_md_type(struct mapped_device *md, unsigned type);
-unsigned dm_get_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
+enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 
 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
@@ -204,8 +204,10 @@ void dm_kcopyd_exit(void);
 /*
  * Mempool operations
  */
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
-					    unsigned integrity, unsigned per_bio_data_size);
+struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md,
+					    enum dm_queue_mode type,
+					    unsigned integrity,
+					    unsigned per_bio_data_size);
 void dm_free_md_mempools(struct dm_md_mempools *pools);
 
 /*
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a7e6903866fd..9df51dcdcf2a 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -22,11 +22,13 @@ struct bio_vec;
 /*
  * Type of table, mapped_device's mempool and request_queue
  */
-#define DM_TYPE_NONE			0
-#define DM_TYPE_BIO_BASED		1
-#define DM_TYPE_REQUEST_BASED		2
-#define DM_TYPE_MQ_REQUEST_BASED	3
-#define DM_TYPE_DAX_BIO_BASED		4
+enum dm_queue_mode {
+	DM_TYPE_NONE		 = 0,
+	DM_TYPE_BIO_BASED	 = 1,
+	DM_TYPE_REQUEST_BASED	 = 2,
+	DM_TYPE_MQ_REQUEST_BASED = 3,
+	DM_TYPE_DAX_BIO_BASED	 = 4,
+};
 
 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
 
@@ -464,7 +466,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
  * Useful for "hybrid" target (supports both bio-based
  * and request-based).
  */
-void dm_table_set_type(struct dm_table *t, unsigned type);
+void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
 
 /*
  * Finally call this to make the table ready for use.
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 11/13] dm-mpath: Micro-optimize the hot path
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (9 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 10/13] dm: Introduce enum dm_queue_mode Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  6:36   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 12/13] dm-mpath: Introduce assign_bit() Bart Van Assche
  2017-04-26 18:37 ` [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes Bart Van Assche
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

Instead of checking MPATHF_QUEUE_IF_NO_PATH,
MPATHF_SAVED_QUEUE_IF_NO_PATH and the no_flush flag to decide whether
or not to push back a request if there are no paths available, only
clear MPATHF_QUEUE_IF_NO_PATH in queue_if_no_path() if no_flush has
not been set. The result is that only a single bit has to be tested
in the hot path to decide whether or not a request must be pushed
back and also that m->lock does not have to be taken in the hot path.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-mpath.c | 70 ++++++++-------------------------------------------
 1 file changed, 11 insertions(+), 59 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index adde8a51e020..0eafe7a2070b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -442,47 +442,6 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 }
 
 /*
- * Check whether bios must be queued in the device-mapper core rather
- * than here in the target.
- *
- * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
- * same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
- */
-static bool __must_push_back(struct multipath *m)
-{
-	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
-		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
-		dm_noflush_suspending(m->ti));
-}
-
-static bool must_push_back_rq(struct multipath *m)
-{
-	bool r;
-	unsigned long flags;
-
-	spin_lock_irqsave(&m->lock, flags);
-	r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
-	     __must_push_back(m));
-	spin_unlock_irqrestore(&m->lock, flags);
-
-	return r;
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
-	bool r;
-	unsigned long flags;
-
-	spin_lock_irqsave(&m->lock, flags);
-	r = __must_push_back(m);
-	spin_unlock_irqrestore(&m->lock, flags);
-
-	return r;
-}
-
-/*
  * Map cloned requests (request-based multipath)
  */
 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -503,7 +462,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 		pgpath = choose_pgpath(m, nr_bytes);
 
 	if (!pgpath) {
-		if (must_push_back_rq(m)) {
+		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 			pr_debug("no path - requeueing\n");
 			return DM_MAPIO_DELAY_REQUEUE;
 		}
@@ -581,9 +540,9 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
 	}
 
 	if (!pgpath) {
-		if (!must_push_back_bio(m))
-			return -EIO;
-		return DM_MAPIO_REQUEUE;
+		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+			return DM_MAPIO_REQUEUE;
+		return -EIO;
 	}
 
 	mpio->pgpath = pgpath;
@@ -675,7 +634,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 		else
 			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 	}
-	if (queue_if_no_path)
+	if (queue_if_no_path || dm_noflush_suspending(m->ti))
 		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 	else
 		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
@@ -1523,12 +1482,9 @@ static int do_end_io(struct multipath *m, struct request *clone,
 	if (mpio->pgpath)
 		fail_path(mpio->pgpath);
 
-	if (!atomic_read(&m->nr_valid_paths)) {
-		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-			if (!must_push_back_rq(m))
-				r = -EIO;
-		}
-	}
+	if (atomic_read(&m->nr_valid_paths) == 0 &&
+	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+		r = -EIO;
 
 	return r;
 }
@@ -1569,13 +1525,9 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
 	if (mpio->pgpath)
 		fail_path(mpio->pgpath);
 
-	if (!atomic_read(&m->nr_valid_paths)) {
-		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-			if (!must_push_back_bio(m))
-				return -EIO;
-			return DM_ENDIO_REQUEUE;
-		}
-	}
+	if (atomic_read(&m->nr_valid_paths) == 0 &&
+	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+		return -EIO;
 
 	/* Queue for the daemon to resubmit */
 	dm_bio_restore(get_bio_details_from_bio(clone), clone);
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 12/13] dm-mpath: Introduce assign_bit()
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (10 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 11/13] dm-mpath: Micro-optimize the hot path Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  6:40   ` Hannes Reinecke
  2017-04-26 18:37 ` [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes Bart Van Assche
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

This patch does not change any functionality but makes the code
easier to read.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-mpath.c | 37 ++++++++++++++++---------------------
 1 file changed, 16 insertions(+), 21 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0eafe7a2070b..57b467207741 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -613,6 +613,14 @@ static void process_queued_bios(struct work_struct *work)
 	blk_finish_plug(&plug);
 }
 
+static void assign_bit(bool value, long nr, unsigned long *addr)
+{
+	if (value)
+		set_bit(nr, addr);
+	else
+		clear_bit(nr, addr);
+}
+
 /*
  * If we run out of usable paths, should we queue I/O or error it?
  */
@@ -622,23 +630,12 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 	unsigned long flags;
 
 	spin_lock_irqsave(&m->lock, flags);
-
-	if (save_old_value) {
-		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
-			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
-		else
-			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
-	} else {
-		if (queue_if_no_path)
-			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
-		else
-			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
-	}
-	if (queue_if_no_path || dm_noflush_suspending(m->ti))
-		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
-	else
-		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
-
+	assign_bit((save_old_value &&
+		    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
+		   (!save_old_value && queue_if_no_path),
+		   MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+	assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
+		   MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 	spin_unlock_irqrestore(&m->lock, flags);
 
 	if (!queue_if_no_path) {
@@ -1593,10 +1590,8 @@ static void multipath_resume(struct dm_target *ti)
 	unsigned long flags;
 
 	spin_lock_irqsave(&m->lock, flags);
-	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
-		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
-	else
-		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+	assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
+		   MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes
  2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
                   ` (11 preceding siblings ...)
  2017-04-26 18:37 ` [PATCH 12/13] dm-mpath: Introduce assign_bit() Bart Van Assche
@ 2017-04-26 18:37 ` Bart Van Assche
  2017-04-27  6:41   ` Hannes Reinecke
  12 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-26 18:37 UTC (permalink / raw)
  To: Mike Snitzer; +Cc: Bart Van Assche, dm-devel, Hannes Reinecke

I/O errors triggered by multipathd incorrectly not enabling the
no-flush flag for DM_DEVICE_SUSPEND or DM_DEVICE_RESUME are hard to
debug. Add more logging to make it easier to debug this.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Hannes Reinecke <hare@suse.com>
---
 drivers/md/dm-mpath.c | 25 +++++++++++++++++++++----
 drivers/md/dm.c       |  3 +++
 2 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 57b467207741..7aebbf4ae7d5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -442,6 +442,23 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 }
 
 /*
+ * Note: dm_report_eio() is a macro instead of a function to make pr_debug()
+ * report the function name and line number of the function from which it has
+ * been invoked.
+ */
+#define dm_report_eio(m)						\
+({									\
+	struct mapped_device *md = dm_table_get_md((m)->ti->table);	\
+									\
+	pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
+		 dm_device_name(md),					\
+		 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
+		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags),	\
+		 dm_noflush_suspending((m)->ti));			\
+	-EIO;								\
+})
+
+/*
  * Map cloned requests (request-based multipath)
  */
 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -466,7 +483,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 			pr_debug("no path - requeueing\n");
 			return DM_MAPIO_DELAY_REQUEUE;
 		}
-		return -EIO;	/* Failed */
+		return dm_report_eio(m);	/* Failed */
 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 		return pg_init_all_paths(m) == 0 ? DM_MAPIO_REQUEUE :
@@ -542,7 +559,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
 	if (!pgpath) {
 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
 			return DM_MAPIO_REQUEUE;
-		return -EIO;
+		return dm_report_eio(m);
 	}
 
 	mpio->pgpath = pgpath;
@@ -1481,7 +1498,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
 
 	if (atomic_read(&m->nr_valid_paths) == 0 &&
 	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
-		r = -EIO;
+		r = dm_report_eio(m);
 
 	return r;
 }
@@ -1524,7 +1541,7 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
 
 	if (atomic_read(&m->nr_valid_paths) == 0 &&
 	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
-		return -EIO;
+		return dm_report_eio(m);
 
 	/* Queue for the daemon to resubmit */
 	dm_bio_restore(get_bio_details_from_bio(clone), clone);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7161c7804363..9f0aed0e1bf0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2158,6 +2158,9 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
 	 */
 	if (noflush)
 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+	else
+		pr_debug("%s: suspending without no-flush flag\n",
+			 dm_device_name(md));
 
 	/*
 	 * This gets reverted if there's an error later and the targets
-- 
2.12.2

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
  2017-04-26 18:37   ` Bart Van Assche
@ 2017-04-27  5:46     ` Hannes Reinecke
  -1 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:46 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, stable, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> If blk_get_request() fails check whether the failure is due to
> a path being removed. If that is the case fail the path by
> triggering a call to fail_path(). This patch avoids that the
> following scenario can be encountered while removing paths:
> * CPU usage of a kworker thread jumps to 100%.
> * Removing the dm device becomes impossible.
> 
> Delay requeueing if blk_get_request() returns -EBUSY or
> -EWOULDBLOCK because in these cases immediate requeuing is
> inappropriate.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: <stable@vger.kernel.org>
> ---
>  drivers/md/dm-mpath.c | 17 ++++++++++++-----
>  1 file changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index 909098e18643..6d4333fdddf5 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -490,6 +490,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  	struct pgpath *pgpath;
>  	struct block_device *bdev;
>  	struct dm_mpath_io *mpio = get_mpio(map_context);
> +	struct request_queue *q;
>  	struct request *clone;
>  
>  	/* Do we need to select a new pgpath? */
> @@ -512,13 +513,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  	mpio->nr_bytes = nr_bytes;
>  
>  	bdev = pgpath->path.dev->bdev;
> -
> -	clone = blk_get_request(bdev_get_queue(bdev),
> -			rq->cmd_flags | REQ_NOMERGE,
> -			GFP_ATOMIC);
> +	q = bdev_get_queue(bdev);
> +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
>  	if (IS_ERR(clone)) {
>  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
> -		return r;
> +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
> +			 PTR_ERR(clone), blk_queue_dying(q) ?
> +			 " (path offline)" : "");
> +		if (blk_queue_dying(q)) {
> +			atomic_inc(&m->pg_init_in_progress);
> +			activate_path(pgpath);
> +			return DM_MAPIO_REQUEUE;
> +		}
> +		return DM_MAPIO_DELAY_REQUEUE;
>  	}
>  	clone->bio = clone->biotail = NULL;
>  	clone->rq_disk = bdev->bd_disk;
> 
At the very least this does warrant some inline comments.
Why do we call activate_path() here, seeing that the queue is dying?

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N�rnberg
GF: F. Imend�rffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG N�rnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
@ 2017-04-27  5:46     ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:46 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, stable, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> If blk_get_request() fails check whether the failure is due to
> a path being removed. If that is the case fail the path by
> triggering a call to fail_path(). This patch avoids that the
> following scenario can be encountered while removing paths:
> * CPU usage of a kworker thread jumps to 100%.
> * Removing the dm device becomes impossible.
> 
> Delay requeueing if blk_get_request() returns -EBUSY or
> -EWOULDBLOCK because in these cases immediate requeuing is
> inappropriate.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: <stable@vger.kernel.org>
> ---
>  drivers/md/dm-mpath.c | 17 ++++++++++++-----
>  1 file changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index 909098e18643..6d4333fdddf5 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -490,6 +490,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  	struct pgpath *pgpath;
>  	struct block_device *bdev;
>  	struct dm_mpath_io *mpio = get_mpio(map_context);
> +	struct request_queue *q;
>  	struct request *clone;
>  
>  	/* Do we need to select a new pgpath? */
> @@ -512,13 +513,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  	mpio->nr_bytes = nr_bytes;
>  
>  	bdev = pgpath->path.dev->bdev;
> -
> -	clone = blk_get_request(bdev_get_queue(bdev),
> -			rq->cmd_flags | REQ_NOMERGE,
> -			GFP_ATOMIC);
> +	q = bdev_get_queue(bdev);
> +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
>  	if (IS_ERR(clone)) {
>  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
> -		return r;
> +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
> +			 PTR_ERR(clone), blk_queue_dying(q) ?
> +			 " (path offline)" : "");
> +		if (blk_queue_dying(q)) {
> +			atomic_inc(&m->pg_init_in_progress);
> +			activate_path(pgpath);
> +			return DM_MAPIO_REQUEUE;
> +		}
> +		return DM_MAPIO_DELAY_REQUEUE;
>  	}
>  	clone->bio = clone->biotail = NULL;
>  	clone->rq_disk = bdev->bd_disk;
> 
At the very least this does warrant some inline comments.
Why do we call activate_path() here, seeing that the queue is dying?

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 03/13] dm-mpath: Delay requeuing while path initialization is in progress
  2017-04-26 18:37   ` Bart Van Assche
@ 2017-04-27  5:47     ` Hannes Reinecke
  -1 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:47 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, stable, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Requeuing a request immediately while path initialization is ongoing
> causes high CPU usage, something that is undesired. Hence delay
> requeuing while path initialization is in progress.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: <stable@vger.kernel.org>
> ---
>  drivers/md/dm-mpath.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index 6d4333fdddf5..e38c92178746 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -322,13 +322,16 @@ static int __pg_init_all_paths(struct multipath *m)
>  	return atomic_read(&m->pg_init_in_progress);
>  }
>  
> -static void pg_init_all_paths(struct multipath *m)
> +static int pg_init_all_paths(struct multipath *m)
>  {
>  	unsigned long flags;
> +	int ret;
>  
>  	spin_lock_irqsave(&m->lock, flags);
> -	__pg_init_all_paths(m);
> +	ret = __pg_init_all_paths(m);
>  	spin_unlock_irqrestore(&m->lock, flags);
> +
> +	return ret;
>  }
>  
>  static void __switch_pg(struct multipath *m, struct priority_group *pg)
> @@ -485,7 +488,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  				   struct request **__clone)
>  {
>  	struct multipath *m = ti->private;
> -	int r = DM_MAPIO_REQUEUE;
>  	size_t nr_bytes = blk_rq_bytes(rq);
>  	struct pgpath *pgpath;
>  	struct block_device *bdev;
> @@ -504,8 +506,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  		return -EIO;	/* Failed */
>  	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
>  		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
> -		pg_init_all_paths(m);
> -		return r;
> +		return pg_init_all_paths(m) == 0 ? DM_MAPIO_REQUEUE :
> +			DM_MAPIO_DELAY_REQUEUE;
>  	}
>  
>  	memset(mpio, 0, sizeof(*mpio));
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N�rnberg
GF: F. Imend�rffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG N�rnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 03/13] dm-mpath: Delay requeuing while path initialization is in progress
@ 2017-04-27  5:47     ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:47 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, stable, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Requeuing a request immediately while path initialization is ongoing
> causes high CPU usage, something that is undesired. Hence delay
> requeuing while path initialization is in progress.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: <stable@vger.kernel.org>
> ---
>  drivers/md/dm-mpath.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index 6d4333fdddf5..e38c92178746 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -322,13 +322,16 @@ static int __pg_init_all_paths(struct multipath *m)
>  	return atomic_read(&m->pg_init_in_progress);
>  }
>  
> -static void pg_init_all_paths(struct multipath *m)
> +static int pg_init_all_paths(struct multipath *m)
>  {
>  	unsigned long flags;
> +	int ret;
>  
>  	spin_lock_irqsave(&m->lock, flags);
> -	__pg_init_all_paths(m);
> +	ret = __pg_init_all_paths(m);
>  	spin_unlock_irqrestore(&m->lock, flags);
> +
> +	return ret;
>  }
>  
>  static void __switch_pg(struct multipath *m, struct priority_group *pg)
> @@ -485,7 +488,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  				   struct request **__clone)
>  {
>  	struct multipath *m = ti->private;
> -	int r = DM_MAPIO_REQUEUE;
>  	size_t nr_bytes = blk_rq_bytes(rq);
>  	struct pgpath *pgpath;
>  	struct block_device *bdev;
> @@ -504,8 +506,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  		return -EIO;	/* Failed */
>  	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
>  		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
> -		pg_init_all_paths(m);
> -		return r;
> +		return pg_init_all_paths(m) == 0 ? DM_MAPIO_REQUEUE :
> +			DM_MAPIO_DELAY_REQUEUE;
>  	}
>  
>  	memset(mpio, 0, sizeof(*mpio));
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 04/13] dm-rq: Adjust requeuing delays
  2017-04-26 18:37 ` [PATCH 04/13] dm-rq: Adjust requeuing delays Bart Van Assche
@ 2017-04-27  5:48   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:48 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Reduce the requeue delay in dm_requeue_original_request() from 5s
> to 0.5s to avoid that this delay slows down failover or failback.
> Increase the requeue delay in dm_mq_queue_rq() from 0.1s to 0.5s
> to reduce the system load if immediate requeuing has been requested
> by the dm driver.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/md/dm-rq.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
> index 0b081d170087..c53debdcd7dc 100644
> --- a/drivers/md/dm-rq.c
> +++ b/drivers/md/dm-rq.c
> @@ -280,7 +280,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
>  	if (!rq->q->mq_ops)
>  		dm_old_requeue_request(rq);
>  	else
> -		dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
> +		dm_mq_delay_requeue_request(rq, delay_requeue ? 500/*ms*/ : 0);
>  
>  	rq_completed(md, rw, false);
>  }
> @@ -755,7 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>  		/* Undo dm_start_request() before requeuing */
>  		rq_end_stats(md, rq);
>  		rq_completed(md, rq_data_dir(rq), false);
> -		blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
> +		blk_mq_delay_run_hw_queue(hctx, 500/*ms*/);
>  		return BLK_MQ_RQ_QUEUE_BUSY;
>  	}
>  
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior
  2017-04-26 18:37 ` [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior Bart Van Assche
@ 2017-04-27  5:49   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:49 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> When debugging the dm-mpath driver it is important to know what
> decisions have been taken with regard to requeuing. Hence this
> patch that adds pr_debug() statements that report what decisions
> have been taken.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/md/dm-mpath.c | 21 ++++++++++++++++++---
>  drivers/md/dm-rq.c    |  5 ++++-
>  2 files changed, 22 insertions(+), 4 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value
  2017-04-26 18:37 ` [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value Bart Van Assche
@ 2017-04-27  5:49   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:49 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer
  Cc: dm-devel, Hannes Reinecke, Christoph Hellwig

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> blk_mq_register_dev() can fail. Hence check the return value of
> that function.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/md/dm-rq.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
> index ba5694be55a4..3ff7280f5dc5 100644
> --- a/drivers/md/dm-rq.c
> +++ b/drivers/md/dm-rq.c
> @@ -813,10 +813,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
>  	dm_init_md_queue(md);
>  
>  	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
> -	blk_mq_register_dev(disk_to_dev(md->disk), q);
> +	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
> +	if (err)
> +		goto free_queue;
>  
>  	return 0;
>  
> +free_queue:
> +	blk_cleanup_queue(q);
>  out_tag_set:
>  	blk_mq_free_tag_set(md->tag_set);
>  out_kfree_tag_set:
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create()
  2017-04-26 18:37 ` [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create() Bart Van Assche
@ 2017-04-27  5:49   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:49 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> The 'cache_size' argument of dm_block_manager_create() has never
> been used. Hence remove it and also the definitions of the constants
> passed as 'cache_size' argument.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-cache-metadata.c                | 3 ---
>  drivers/md/dm-era-target.c                    | 2 --
>  drivers/md/dm-thin-metadata.c                 | 2 --
>  drivers/md/persistent-data/dm-block-manager.c | 1 -
>  drivers/md/persistent-data/dm-block-manager.h | 2 +-
>  5 files changed, 1 insertion(+), 9 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime
  2017-04-26 18:37 ` [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime Bart Van Assche
@ 2017-04-27  5:50   ` Hannes Reinecke
  2017-04-27  6:05   ` Hannes Reinecke
  1 sibling, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:50 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Ensure that the assumptions about the caller holding suspend_lock
> are checked at runtime if lockdep is enabled.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-table.c | 4 ++++
>  drivers/md/dm.c       | 9 ++++-----
>  2 files changed, 8 insertions(+), 5 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 09/13] dm-mpath: Verify locking assumptions at runtime
  2017-04-26 18:37 ` [PATCH 09/13] dm-mpath: Verify locking " Bart Van Assche
@ 2017-04-27  5:50   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:50 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Verify at runtime that __pg_init_all_paths() is called with
> multipath.lock held if lockdep is enabled.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-mpath.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index fb4b7228fe5f..312d4fc34430 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -298,6 +298,8 @@ static int __pg_init_all_paths(struct multipath *m)
>  	struct pgpath *pgpath;
>  	unsigned long pg_init_delay = 0;
>  
> +	lockdep_assert_held(&m->lock);
> +
>  	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
>  		return 0;
>  
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 10/13] dm: Introduce enum dm_queue_mode
  2017-04-26 18:37 ` [PATCH 10/13] dm: Introduce enum dm_queue_mode Bart Van Assche
@ 2017-04-27  5:51   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  5:51 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Introduce an enumeration type for the queue mode. This patch does
> not change any functionality but makes the dm code easier to read.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-core.h          |  2 +-
>  drivers/md/dm-ioctl.c         |  2 +-
>  drivers/md/dm-mpath.c         |  5 ++++-
>  drivers/md/dm-table.c         | 14 +++++++-------
>  drivers/md/dm.c               | 14 +++++++++-----
>  drivers/md/dm.h               | 12 +++++++-----
>  include/linux/device-mapper.h | 14 ++++++++------
>  7 files changed, 37 insertions(+), 26 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime
  2017-04-26 18:37 ` [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime Bart Van Assche
  2017-04-27  5:50   ` Hannes Reinecke
@ 2017-04-27  6:05   ` Hannes Reinecke
  1 sibling, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  6:05 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Ensure that the assumptions about the caller holding suspend_lock
> are checked at runtime if lockdep is enabled.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-table.c | 4 ++++
>  drivers/md/dm.c       | 9 ++++-----
>  2 files changed, 8 insertions(+), 5 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 11/13] dm-mpath: Micro-optimize the hot path
  2017-04-26 18:37 ` [PATCH 11/13] dm-mpath: Micro-optimize the hot path Bart Van Assche
@ 2017-04-27  6:36   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  6:36 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> Instead of checking MPATHF_QUEUE_IF_NO_PATH,
> MPATHF_SAVED_QUEUE_IF_NO_PATH and the no_flush flag to decide whether
> or not to push back a request if there are no paths available, only
> clear MPATHF_QUEUE_IF_NO_PATH in queue_if_no_path() if no_flush has
> not been set. The result is that only a single bit has to be tested
> in the hot path to decide whether or not a request must be pushed
> back and also that m->lock does not have to be taken in the hot path.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-mpath.c | 70 ++++++++-------------------------------------------
>  1 file changed, 11 insertions(+), 59 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index adde8a51e020..0eafe7a2070b 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -442,47 +442,6 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
>  }
>  
>  /*
> - * Check whether bios must be queued in the device-mapper core rather
> - * than here in the target.
> - *
> - * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
> - * same value then we are not between multipath_presuspend()
> - * and multipath_resume() calls and we have no need to check
> - * for the DMF_NOFLUSH_SUSPENDING flag.
> - */
> -static bool __must_push_back(struct multipath *m)
> -{
> -	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
> -		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
> -		dm_noflush_suspending(m->ti));
> -}
> -
> -static bool must_push_back_rq(struct multipath *m)
> -{
> -	bool r;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&m->lock, flags);
> -	r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
> -	     __must_push_back(m));
> -	spin_unlock_irqrestore(&m->lock, flags);
> -
> -	return r;
> -}
> -
> -static bool must_push_back_bio(struct multipath *m)
> -{
> -	bool r;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&m->lock, flags);
> -	r = __must_push_back(m);
> -	spin_unlock_irqrestore(&m->lock, flags);
> -
> -	return r;
> -}
> -
> -/*
>   * Map cloned requests (request-based multipath)
>   */
>  static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
> @@ -503,7 +462,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
>  		pgpath = choose_pgpath(m, nr_bytes);
>  
>  	if (!pgpath) {
> -		if (must_push_back_rq(m)) {
> +		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
>  			pr_debug("no path - requeueing\n");
>  			return DM_MAPIO_DELAY_REQUEUE;
>  		}
> @@ -581,9 +540,9 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
>  	}
>  
>  	if (!pgpath) {
> -		if (!must_push_back_bio(m))
> -			return -EIO;
> -		return DM_MAPIO_REQUEUE;
> +		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
> +			return DM_MAPIO_REQUEUE;
> +		return -EIO;
>  	}
>  
>  	mpio->pgpath = pgpath;
> @@ -675,7 +634,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
>  		else
>  			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
>  	}
> -	if (queue_if_no_path)
> +	if (queue_if_no_path || dm_noflush_suspending(m->ti))
>  		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
>  	else
>  		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
> @@ -1523,12 +1482,9 @@ static int do_end_io(struct multipath *m, struct request *clone,
>  	if (mpio->pgpath)
>  		fail_path(mpio->pgpath);
>  
> -	if (!atomic_read(&m->nr_valid_paths)) {
> -		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
> -			if (!must_push_back_rq(m))
> -				r = -EIO;
> -		}
> -	}
> +	if (atomic_read(&m->nr_valid_paths) == 0 &&
> +	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
> +		r = -EIO;
>  
>  	return r;
>  }
> @@ -1569,13 +1525,9 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
>  	if (mpio->pgpath)
>  		fail_path(mpio->pgpath);
>  
> -	if (!atomic_read(&m->nr_valid_paths)) {
> -		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
> -			if (!must_push_back_bio(m))
> -				return -EIO;
> -			return DM_ENDIO_REQUEUE;
> -		}
> -	}
> +	if (atomic_read(&m->nr_valid_paths) == 0 &&
> +	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
> +		return -EIO;
>  
>  	/* Queue for the daemon to resubmit */
>  	dm_bio_restore(get_bio_details_from_bio(clone), clone);
> 

_Looks_ okay.
But the entire 'must_push_back' stuff is so convoluted (plus not
actually required for rq-based multipathing) that I'll leave the final
decision to Mike.
But anyway:

Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 12/13] dm-mpath: Introduce assign_bit()
  2017-04-26 18:37 ` [PATCH 12/13] dm-mpath: Introduce assign_bit() Bart Van Assche
@ 2017-04-27  6:40   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  6:40 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> This patch does not change any functionality but makes the code
> easier to read.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-mpath.c | 37 ++++++++++++++++---------------------
>  1 file changed, 16 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
> index 0eafe7a2070b..57b467207741 100644
> --- a/drivers/md/dm-mpath.c
> +++ b/drivers/md/dm-mpath.c
> @@ -613,6 +613,14 @@ static void process_queued_bios(struct work_struct *work)
>  	blk_finish_plug(&plug);
>  }
>  
> +static void assign_bit(bool value, long nr, unsigned long *addr)
> +{
> +	if (value)
> +		set_bit(nr, addr);
> +	else
> +		clear_bit(nr, addr);
> +}
> +
>  /*
>   * If we run out of usable paths, should we queue I/O or error it?
>   */
> @@ -622,23 +630,12 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&m->lock, flags);
> -
> -	if (save_old_value) {
> -		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
> -			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
> -		else
> -			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
> -	} else {
> -		if (queue_if_no_path)
> -			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
> -		else
> -			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
> -	}
> -	if (queue_if_no_path || dm_noflush_suspending(m->ti))
> -		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
> -	else
> -		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
> -
> +	assign_bit((save_old_value &&
> +		    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
> +		   (!save_old_value && queue_if_no_path),
> +		   MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
> +	assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
> +		   MPATHF_QUEUE_IF_NO_PATH, &m->flags);
>  	spin_unlock_irqrestore(&m->lock, flags);
>  
>  	if (!queue_if_no_path) {
> @@ -1593,10 +1590,8 @@ static void multipath_resume(struct dm_target *ti)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&m->lock, flags);
> -	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
> -		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
> -	else
> -		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
> +	assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
> +		   MPATHF_QUEUE_IF_NO_PATH, &m->flags);
>  	spin_unlock_irqrestore(&m->lock, flags);
>  }
>  
> 
Not that it makes the code easier to read, but it's certainly shorter.
Personally, I would love to do away with the 'SAVED_QUEUE_IF_NO_PATH'
thingie. But anyway.

Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes
  2017-04-26 18:37 ` [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes Bart Van Assche
@ 2017-04-27  6:41   ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27  6:41 UTC (permalink / raw)
  To: Bart Van Assche, Mike Snitzer; +Cc: dm-devel, Hannes Reinecke

On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> I/O errors triggered by multipathd incorrectly not enabling the
> no-flush flag for DM_DEVICE_SUSPEND or DM_DEVICE_RESUME are hard to
> debug. Add more logging to make it easier to debug this.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Hannes Reinecke <hare@suse.com>
> ---
>  drivers/md/dm-mpath.c | 25 +++++++++++++++++++++----
>  drivers/md/dm.c       |  3 +++
>  2 files changed, 24 insertions(+), 4 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
  2017-04-27  5:46     ` Hannes Reinecke
  (?)
@ 2017-04-27 15:11     ` Bart Van Assche
  2017-04-27 15:13         ` Hannes Reinecke
  -1 siblings, 1 reply; 36+ messages in thread
From: Bart Van Assche @ 2017-04-27 15:11 UTC (permalink / raw)
  To: hare, snitzer; +Cc: dm-devel, hch, hare, stable

On Thu, 2017-04-27 at 07:46 +0200, Hannes Reinecke wrote:
> On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> > +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
> >  	if (IS_ERR(clone)) {
> >  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
> > -		return r;
> > +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
> > +			 PTR_ERR(clone), blk_queue_dying(q) ?
> > +			 " (path offline)" : "");
> > +		if (blk_queue_dying(q)) {
> > +			atomic_inc(&m->pg_init_in_progress);
> > +			activate_path(pgpath);
> > +			return DM_MAPIO_REQUEUE;
> > +		}
> > +		return DM_MAPIO_DELAY_REQUEUE;
> >  	}
> >  	clone->bio = clone->biotail = NULL;
> >  	clone->rq_disk = bdev->bd_disk;
> 
> At the very least this does warrant some inline comments.
> Why do we call activate_path() here, seeing that the queue is dying?

Hello Hannes,

activate_path() is not only able to activate a path but can also change
the state of a path to offline. The body of the activate_path() function
makes that clear and that is why I had not added a comment above the
activate_path() call:

static void activate_path(struct pgpath *pgpath)
{
	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);

	if (pgpath->is_active && !blk_queue_dying(q))
		scsi_dh_activate(q, pg_init_done, pgpath);
	else
		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}

Bart.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [dm-devel] [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
  2017-04-27 15:11     ` Bart Van Assche
@ 2017-04-27 15:13         ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27 15:13 UTC (permalink / raw)
  To: Bart Van Assche, snitzer; +Cc: dm-devel, hch, hare, stable

On 04/27/2017 05:11 PM, Bart Van Assche wrote:
> On Thu, 2017-04-27 at 07:46 +0200, Hannes Reinecke wrote:
>> On 04/26/2017 08:37 PM, Bart Van Assche wrote:
>>> +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
>>>  	if (IS_ERR(clone)) {
>>>  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
>>> -		return r;
>>> +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
>>> +			 PTR_ERR(clone), blk_queue_dying(q) ?
>>> +			 " (path offline)" : "");
>>> +		if (blk_queue_dying(q)) {
>>> +			atomic_inc(&m->pg_init_in_progress);
>>> +			activate_path(pgpath);
>>> +			return DM_MAPIO_REQUEUE;
>>> +		}
>>> +		return DM_MAPIO_DELAY_REQUEUE;
>>>  	}
>>>  	clone->bio = clone->biotail = NULL;
>>>  	clone->rq_disk = bdev->bd_disk;
>>
>> At the very least this does warrant some inline comments.
>> Why do we call activate_path() here, seeing that the queue is dying?
> 
> Hello Hannes,
> 
> activate_path() is not only able to activate a path but can also change
> the state of a path to offline. The body of the activate_path() function
> makes that clear and that is why I had not added a comment above the
> activate_path() call:
> 
> static void activate_path(struct pgpath *pgpath)
> {
> 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
> 
> 	if (pgpath->is_active && !blk_queue_dying(q))
> 		scsi_dh_activate(q, pg_init_done, pgpath);
> 	else
> 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
> }
> 
So why not call 'pg_init_done()' directly and avoid the confusion?

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N�rnberg
GF: F. Imend�rffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG N�rnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
@ 2017-04-27 15:13         ` Hannes Reinecke
  0 siblings, 0 replies; 36+ messages in thread
From: Hannes Reinecke @ 2017-04-27 15:13 UTC (permalink / raw)
  To: Bart Van Assche, snitzer; +Cc: dm-devel, hch, stable, hare

On 04/27/2017 05:11 PM, Bart Van Assche wrote:
> On Thu, 2017-04-27 at 07:46 +0200, Hannes Reinecke wrote:
>> On 04/26/2017 08:37 PM, Bart Van Assche wrote:
>>> +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
>>>  	if (IS_ERR(clone)) {
>>>  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
>>> -		return r;
>>> +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
>>> +			 PTR_ERR(clone), blk_queue_dying(q) ?
>>> +			 " (path offline)" : "");
>>> +		if (blk_queue_dying(q)) {
>>> +			atomic_inc(&m->pg_init_in_progress);
>>> +			activate_path(pgpath);
>>> +			return DM_MAPIO_REQUEUE;
>>> +		}
>>> +		return DM_MAPIO_DELAY_REQUEUE;
>>>  	}
>>>  	clone->bio = clone->biotail = NULL;
>>>  	clone->rq_disk = bdev->bd_disk;
>>
>> At the very least this does warrant some inline comments.
>> Why do we call activate_path() here, seeing that the queue is dying?
> 
> Hello Hannes,
> 
> activate_path() is not only able to activate a path but can also change
> the state of a path to offline. The body of the activate_path() function
> makes that clear and that is why I had not added a comment above the
> activate_path() call:
> 
> static void activate_path(struct pgpath *pgpath)
> {
> 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
> 
> 	if (pgpath->is_active && !blk_queue_dying(q))
> 		scsi_dh_activate(q, pg_init_done, pgpath);
> 	else
> 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
> }
> 
So why not call 'pg_init_done()' directly and avoid the confusion?

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop
  2017-04-27 15:13         ` Hannes Reinecke
  (?)
@ 2017-04-27 15:36         ` Mike Snitzer
  -1 siblings, 0 replies; 36+ messages in thread
From: Mike Snitzer @ 2017-04-27 15:36 UTC (permalink / raw)
  To: Hannes Reinecke; +Cc: Bart Van Assche, dm-devel, hch, hare, stable

On Thu, Apr 27 2017 at 11:13am -0400,
Hannes Reinecke <hare@suse.de> wrote:

> On 04/27/2017 05:11 PM, Bart Van Assche wrote:
> > On Thu, 2017-04-27 at 07:46 +0200, Hannes Reinecke wrote:
> >> On 04/26/2017 08:37 PM, Bart Van Assche wrote:
> >>> +	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
> >>>  	if (IS_ERR(clone)) {
> >>>  		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
> >>> -		return r;
> >>> +		pr_debug("blk_get_request() returned %ld%s - requeuing\n",
> >>> +			 PTR_ERR(clone), blk_queue_dying(q) ?
> >>> +			 " (path offline)" : "");
> >>> +		if (blk_queue_dying(q)) {
> >>> +			atomic_inc(&m->pg_init_in_progress);
> >>> +			activate_path(pgpath);
> >>> +			return DM_MAPIO_REQUEUE;
> >>> +		}
> >>> +		return DM_MAPIO_DELAY_REQUEUE;
> >>>  	}
> >>>  	clone->bio = clone->biotail = NULL;
> >>>  	clone->rq_disk = bdev->bd_disk;
> >>
> >> At the very least this does warrant some inline comments.
> >> Why do we call activate_path() here, seeing that the queue is dying?
> > 
> > Hello Hannes,
> > 
> > activate_path() is not only able to activate a path but can also change
> > the state of a path to offline. The body of the activate_path() function
> > makes that clear and that is why I had not added a comment above the
> > activate_path() call:
> > 
> > static void activate_path(struct pgpath *pgpath)
> > {
> > 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
> > 
> > 	if (pgpath->is_active && !blk_queue_dying(q))
> > 		scsi_dh_activate(q, pg_init_done, pgpath);
> > 	else
> > 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
> > }
> > 
> So why not call 'pg_init_done()' directly and avoid the confusion?

Doing so is sprinkling more SCSI specific droppings in code that should
be increasingly transport agnostic.  Might be worth renaming
activate_path() to activate_or_offline_path() ?

Mike

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2017-04-27 15:36 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-26 18:37 [PATCH 00/13] Device mapper and dm-mpath patches Bart Van Assche
2017-04-26 18:37 ` [PATCH 01/13] dm-mpath: Split activate_path() Bart Van Assche
2017-04-26 18:37   ` Bart Van Assche
2017-04-26 18:37 ` [PATCH 02/13] dm-mpath: Avoid that path removal can trigger an infinite loop Bart Van Assche
2017-04-26 18:37   ` Bart Van Assche
2017-04-27  5:46   ` [dm-devel] " Hannes Reinecke
2017-04-27  5:46     ` Hannes Reinecke
2017-04-27 15:11     ` Bart Van Assche
2017-04-27 15:13       ` Hannes Reinecke
2017-04-27 15:13         ` Hannes Reinecke
2017-04-27 15:36         ` Mike Snitzer
2017-04-26 18:37 ` [PATCH 03/13] dm-mpath: Delay requeuing while path initialization is in progress Bart Van Assche
2017-04-26 18:37   ` Bart Van Assche
2017-04-27  5:47   ` [dm-devel] " Hannes Reinecke
2017-04-27  5:47     ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 04/13] dm-rq: Adjust requeuing delays Bart Van Assche
2017-04-27  5:48   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 05/13] dm-mpath: Make it easier to analyze requeuing behavior Bart Van Assche
2017-04-27  5:49   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 06/13] dm-rq: Check blk_mq_register_dev() return value Bart Van Assche
2017-04-27  5:49   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 07/13] dm, persistence: Remove an unused argument from dm_block_manager_create() Bart Van Assche
2017-04-27  5:49   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 08/13] dm: Verify suspend_locking assumptions at runtime Bart Van Assche
2017-04-27  5:50   ` Hannes Reinecke
2017-04-27  6:05   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 09/13] dm-mpath: Verify locking " Bart Van Assche
2017-04-27  5:50   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 10/13] dm: Introduce enum dm_queue_mode Bart Van Assche
2017-04-27  5:51   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 11/13] dm-mpath: Micro-optimize the hot path Bart Van Assche
2017-04-27  6:36   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 12/13] dm-mpath: Introduce assign_bit() Bart Van Assche
2017-04-27  6:40   ` Hannes Reinecke
2017-04-26 18:37 ` [PATCH 13/13] dm, dm-mpath: Make it easier to detect unintended I/O request flushes Bart Van Assche
2017-04-27  6:41   ` Hannes Reinecke

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.