linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* remove more legacy request leftover
@ 2018-11-14 16:02 Christoph Hellwig
  2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
                   ` (16 more replies)
  0 siblings, 17 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Hi Jens,

this series removes another bunch of legacy request leftovers,
including the pointer indirection for the queue_lock.

Note that we have very few queue_lock users left, I wonder if
we should get rid of it entirely and have separate locks for
the cgroup and I/O scheduler code, which are the only heavy
users?

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:50   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 02/16] block: remove deadline __deadline manipulation helpers Christoph Hellwig
                   ` (15 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Unused since the removal of the legacy request code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-cgroup.c         | 15 ---------------
 block/blk-core.c           | 21 ---------------------
 block/blk-mq-debugfs.c     |  1 -
 block/blk-throttle.c       |  3 ---
 include/linux/blk-cgroup.h |  6 +-----
 include/linux/blkdev.h     |  3 ---
 6 files changed, 1 insertion(+), 48 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 6c65791bc3fe..a95cddb39f1c 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -270,13 +270,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 	WARN_ON_ONCE(!rcu_read_lock_held());
 	lockdep_assert_held(q->queue_lock);
 
-	/*
-	 * This could be the first entry point of blkcg implementation and
-	 * we shouldn't allow anything to go through for a bypassing queue.
-	 */
-	if (unlikely(blk_queue_bypass(q)))
-		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
 	blkg = __blkg_lookup(blkcg, q, true);
 	if (blkg)
 		return blkg;
@@ -741,14 +734,6 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 
 	if (!blkcg_policy_enabled(q, pol))
 		return ERR_PTR(-EOPNOTSUPP);
-
-	/*
-	 * This could be the first entry point of blkcg implementation and
-	 * we shouldn't allow anything to go through for a bypassing queue.
-	 */
-	if (unlikely(blk_queue_bypass(q)))
-		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
 	return __blkg_lookup(blkcg, q, true /* update_hint */);
 }
 
diff --git a/block/blk-core.c b/block/blk-core.c
index fdc0ad2686c4..1c9b6975cf0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -370,18 +370,6 @@ void blk_cleanup_queue(struct request_queue *q)
 	blk_set_queue_dying(q);
 	spin_lock_irq(lock);
 
-	/*
-	 * A dying queue is permanently in bypass mode till released.  Note
-	 * that, unlike blk_queue_bypass_start(), we aren't performing
-	 * synchronize_rcu() after entering bypass mode to avoid the delay
-	 * as some drivers create and destroy a lot of queues while
-	 * probing.  This is still safe because blk_release_queue() will be
-	 * called only after the queue refcnt drops to zero and nothing,
-	 * RCU or not, would be traversing the queue by then.
-	 */
-	q->bypass_depth++;
-	queue_flag_set(QUEUE_FLAG_BYPASS, q);
-
 	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 	queue_flag_set(QUEUE_FLAG_DYING, q);
@@ -589,15 +577,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 
 	q->queue_lock = lock ? : &q->__queue_lock;
 
-	/*
-	 * A queue starts its life with bypass turned on to avoid
-	 * unnecessary bypass on/off overhead and nasty surprises during
-	 * init.  The initial bypass will be finished when the queue is
-	 * registered by blk_register_queue().
-	 */
-	q->bypass_depth = 1;
-	queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
-
 	init_waitqueue_head(&q->mq_freeze_wq);
 
 	/*
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index f021f4817b80..a32bb79d6c95 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -114,7 +114,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
 static const char *const blk_queue_flag_name[] = {
 	QUEUE_FLAG_NAME(STOPPED),
 	QUEUE_FLAG_NAME(DYING),
-	QUEUE_FLAG_NAME(BYPASS),
 	QUEUE_FLAG_NAME(BIDI),
 	QUEUE_FLAG_NAME(NOMERGES),
 	QUEUE_FLAG_NAME(SAME_COMP),
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index db1a3a2ae006..8e6f3c9821c2 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2145,9 +2145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 
 	throtl_update_latency_buckets(td);
 
-	if (unlikely(blk_queue_bypass(q)))
-		goto out_unlock;
-
 	blk_throtl_assoc_bio(tg, bio);
 	blk_throtl_update_idletime(tg);
 
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b299e025e83..2c68efc603bd 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -325,16 +325,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  * @q: request_queue of interest
  *
  * Lookup blkg for the @blkcg - @q pair.  This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
+ * under RCU read loc.
  */
 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
 					   struct request_queue *q)
 {
 	WARN_ON_ONCE(!rcu_read_lock_held());
-
-	if (unlikely(blk_queue_bypass(q)))
-		return NULL;
 	return __blkg_lookup(blkcg, q, false);
 }
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e67ad2dd025e..c92aafcde0b8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -551,7 +551,6 @@ struct request_queue {
 
 	struct mutex		sysfs_lock;
 
-	int			bypass_depth;
 	atomic_t		mq_freeze_depth;
 
 #if defined(CONFIG_BLK_DEV_BSG)
@@ -589,7 +588,6 @@ struct request_queue {
 
 #define QUEUE_FLAG_STOPPED	1	/* queue is stopped */
 #define QUEUE_FLAG_DYING	2	/* queue being torn down */
-#define QUEUE_FLAG_BYPASS	3	/* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI		4	/* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     5	/* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP	6	/* complete on same CPU-group */
@@ -633,7 +631,6 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q)	\
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 02/16] block: remove deadline __deadline manipulation helpers
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
  2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:51   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request Christoph Hellwig
                   ` (14 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

No users left since the removal of the legacy request interface, we can
remove all the magic bit stealing now and make it a normal field.

But use WRITE_ONCE/READ_ONCE on the new deadline field, given that we
don't seem to have any mechanism to guarantee a new value actually
gets seen by other threads.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c         |  4 ++--
 block/blk-timeout.c    |  8 +++++---
 block/blk.h            | 35 -----------------------------------
 include/linux/blkdev.h |  4 +---
 4 files changed, 8 insertions(+), 43 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 411be60d0cb6..4c82b4b4fa3e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -325,7 +325,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	rq->special = NULL;
 	/* tag was already set */
 	rq->extra_len = 0;
-	rq->__deadline = 0;
+	WRITE_ONCE(rq->deadline, 0);
 
 	rq->timeout = 0;
 
@@ -839,7 +839,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 	if (rq->rq_flags & RQF_TIMED_OUT)
 		return false;
 
-	deadline = blk_rq_deadline(rq);
+	deadline = READ_ONCE(rq->deadline);
 	if (time_after_eq(jiffies, deadline))
 		return true;
 
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 006cff4390c0..3b0179fbdd6a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -84,7 +84,7 @@ void blk_abort_request(struct request *req)
 	 * immediately and that scan sees the new timeout value.
 	 * No need for fancy synchronizations.
 	 */
-	blk_rq_set_deadline(req, jiffies);
+	WRITE_ONCE(req->deadline, jiffies);
 	kblockd_schedule_work(&req->q->timeout_work);
 }
 EXPORT_SYMBOL_GPL(blk_abort_request);
@@ -121,14 +121,16 @@ void blk_add_timer(struct request *req)
 		req->timeout = q->rq_timeout;
 
 	req->rq_flags &= ~RQF_TIMED_OUT;
-	blk_rq_set_deadline(req, jiffies + req->timeout);
+
+	expiry = jiffies + req->timeout;
+	WRITE_ONCE(req->deadline, expiry);
 
 	/*
 	 * If the timer isn't already pending or this timeout is earlier
 	 * than an existing one, modify the timer. Round up to next nearest
 	 * second.
 	 */
-	expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
+	expiry = blk_rq_timeout(round_jiffies_up(expiry));
 
 	if (!timer_pending(&q->timeout) ||
 	    time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk.h b/block/blk.h
index 41b64e6e101b..08a5845b03ba 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -238,26 +238,6 @@ void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
 void blk_account_io_done(struct request *req, u64 now);
 
-/*
- * EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds. Steal the bottom bit of the
- * __deadline field for this.
- */
-static inline int blk_mark_rq_complete(struct request *rq)
-{
-	return test_and_set_bit(0, &rq->__deadline);
-}
-
-static inline void blk_clear_rq_complete(struct request *rq)
-{
-	clear_bit(0, &rq->__deadline);
-}
-
-static inline bool blk_rq_is_complete(struct request *rq)
-{
-	return test_bit(0, &rq->__deadline);
-}
-
 /*
  * Internal elevator interface
  */
@@ -322,21 +302,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
 		q->last_merge = NULL;
 }
 
-/*
- * Steal a bit from this field for legacy IO path atomic IO marking. Note that
- * setting the deadline clears the bottom bit, potentially clearing the
- * completed bit. The user has to be OK with this (current ones are fine).
- */
-static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
-{
-	rq->__deadline = time & ~0x1UL;
-}
-
-static inline unsigned long blk_rq_deadline(struct request *rq)
-{
-	return rq->__deadline & ~0x1UL;
-}
-
 /*
  * Internal io_context interface
  */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c92aafcde0b8..1aa94759d219 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -224,9 +224,7 @@ struct request {
 	refcount_t ref;
 
 	unsigned int timeout;
-
-	/* access through blk_rq_set_deadline, blk_rq_deadline */
-	unsigned long __deadline;
+	unsigned long deadline;
 
 	union {
 		struct __call_single_data csd;
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
  2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
  2018-11-14 16:02 ` [PATCH 02/16] block: remove deadline __deadline manipulation helpers Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:51   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 04/16] block: use atomic bitops for ->queue_flags Christoph Hellwig
                   ` (13 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

There is nothing it could synchronize against, so don't go through
the pains of acquiring the lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-timeout.c                 |  2 +-
 drivers/ata/libata-eh.c             |  4 ----
 drivers/block/mtip32xx/mtip32xx.c   |  5 +----
 drivers/scsi/libsas/sas_ata.c       |  5 -----
 drivers/scsi/libsas/sas_scsi_host.c | 10 ++--------
 5 files changed, 4 insertions(+), 22 deletions(-)

diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 3b0179fbdd6a..124c26128bf6 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -75,7 +75,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
  * This function requests that the block layer start recovery for the
  * request by deleting the timer and calling the q's timeout function.
  * LLDDs who implement their own error recovery MAY ignore the timeout
- * event if they generated blk_abort_req. Must hold queue lock.
+ * event if they generated blk_abort_request.
  */
 void blk_abort_request(struct request *req)
 {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 01306c018398..938ed513b070 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -919,8 +919,6 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	struct request_queue *q = qc->scsicmd->device->request_queue;
-	unsigned long flags;
 
 	WARN_ON(!ap->ops->error_handler);
 
@@ -932,9 +930,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
 	 * this function completes.
 	 */
-	spin_lock_irqsave(q->queue_lock, flags);
 	blk_abort_request(qc->scsicmd->request);
-	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a4c44db097e0..2b0ac9d01e51 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2770,10 +2770,7 @@ static int mtip_service_thread(void *data)
 
 			blk_mq_quiesce_queue(dd->queue);
 
-			spin_lock(dd->queue->queue_lock);
-			blk_mq_tagset_busy_iter(&dd->tags,
-							mtip_queue_cmd, dd);
-			spin_unlock(dd->queue->queue_lock);
+			blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
 
 			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
 
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4f6cdf53e913..c90b278cc28c 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -601,12 +601,7 @@ void sas_ata_task_abort(struct sas_task *task)
 
 	/* Bounce SCSI-initiated commands to the SCSI EH */
 	if (qc->scsicmd) {
-		struct request_queue *q = qc->scsicmd->device->request_queue;
-		unsigned long flags;
-
-		spin_lock_irqsave(q->queue_lock, flags);
 		blk_abort_request(qc->scsicmd->request);
-		spin_unlock_irqrestore(q->queue_lock, flags);
 		return;
 	}
 
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 33229348dcb6..af085432c5fe 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -930,16 +930,10 @@ void sas_task_abort(struct sas_task *task)
 		return;
 	}
 
-	if (dev_is_sata(task->dev)) {
+	if (dev_is_sata(task->dev))
 		sas_ata_task_abort(task);
-	} else {
-		struct request_queue *q = sc->device->request_queue;
-		unsigned long flags;
-
-		spin_lock_irqsave(q->queue_lock, flags);
+	else
 		blk_abort_request(sc->request);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-	}
 }
 
 void sas_target_destroy(struct scsi_target *starget)
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 04/16] block: use atomic bitops for ->queue_flags
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (2 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:55   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 05/16] block: remove queue_lockdep_assert_held Christoph Hellwig
                   ` (12 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

->queue_flags is generally not set or cleared in the fast path, and also
generally set or cleared one flag at a time.  Make use of the normal
atomic bitops for it so that we don't need to take the queue_lock,
which is otherwise mostly unused in the core block layer now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c       | 54 ++++++----------------------------------
 block/blk-mq.c         |  2 +-
 block/blk-settings.c   | 10 +++-----
 block/blk-sysfs.c      | 28 +++++++++------------
 block/blk.h            | 56 ------------------------------------------
 include/linux/blkdev.h |  1 -
 6 files changed, 24 insertions(+), 127 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 1c9b6975cf0a..5c8e66a09d82 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -74,11 +74,7 @@ static struct workqueue_struct *kblockd_workqueue;
  */
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	queue_flag_set(flag, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	set_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL(blk_queue_flag_set);
 
@@ -89,11 +85,7 @@ EXPORT_SYMBOL(blk_queue_flag_set);
  */
 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	queue_flag_clear(flag, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	clear_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL(blk_queue_flag_clear);
 
@@ -107,38 +99,10 @@ EXPORT_SYMBOL(blk_queue_flag_clear);
  */
 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
 {
-	unsigned long flags;
-	bool res;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	res = queue_flag_test_and_set(flag, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	return res;
+	return test_and_set_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
 
-/**
- * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
- * @flag: flag to be cleared
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was set.
- */
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
-{
-	unsigned long flags;
-	bool res;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	res = queue_flag_test_and_clear(flag, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	return res;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-
 void blk_rq_init(struct request_queue *q, struct request *rq)
 {
 	memset(rq, 0, sizeof(*rq));
@@ -368,12 +332,10 @@ void blk_cleanup_queue(struct request_queue *q)
 	/* mark @q DYING, no new request or merges will be allowed afterwards */
 	mutex_lock(&q->sysfs_lock);
 	blk_set_queue_dying(q);
-	spin_lock_irq(lock);
 
-	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
-	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-	queue_flag_set(QUEUE_FLAG_DYING, q);
-	spin_unlock_irq(lock);
+	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
 	mutex_unlock(&q->sysfs_lock);
 
 	/*
@@ -384,9 +346,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
 	rq_qos_exit(q);
 
-	spin_lock_irq(lock);
-	queue_flag_set(QUEUE_FLAG_DEAD, q);
-	spin_unlock_irq(lock);
+	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
 
 	/*
 	 * make sure all in-progress dispatch are completed because
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c82b4b4fa3e..e2717e843727 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2756,7 +2756,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
-		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
 
 	q->sg_reserved_size = INT_MAX;
 
diff --git a/block/blk-settings.c b/block/blk-settings.c
index cca83590a1dc..3abe831e92c8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -834,16 +834,14 @@ EXPORT_SYMBOL(blk_set_queue_depth);
  */
 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 {
-	spin_lock_irq(q->queue_lock);
 	if (wc)
-		queue_flag_set(QUEUE_FLAG_WC, q);
+		blk_queue_flag_set(QUEUE_FLAG_WC, q);
 	else
-		queue_flag_clear(QUEUE_FLAG_WC, q);
+		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 	if (fua)
-		queue_flag_set(QUEUE_FLAG_FUA, q);
+		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
 	else
-		queue_flag_clear(QUEUE_FLAG_FUA, q);
-	spin_unlock_irq(q->queue_lock);
+		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
 
 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 }
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index d4b1b84ba8ca..22fd086eba9f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -316,14 +316,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 	if (ret < 0)
 		return ret;
 
-	spin_lock_irq(q->queue_lock);
-	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
-	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
 	if (nm == 2)
-		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 	else if (nm)
-		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-	spin_unlock_irq(q->queue_lock);
+		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 
 	return ret;
 }
@@ -347,18 +345,16 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 	if (ret < 0)
 		return ret;
 
-	spin_lock_irq(q->queue_lock);
 	if (val == 2) {
-		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
 	} else if (val == 1) {
-		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 	} else if (val == 0) {
-		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
-		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 	}
-	spin_unlock_irq(q->queue_lock);
 #endif
 	return ret;
 }
@@ -889,7 +885,7 @@ int blk_register_queue(struct gendisk *disk)
 	WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
 		  "%s is registering an already registered queue\n",
 		  kobject_name(&dev->kobj));
-	queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
+	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
 
 	/*
 	 * SCSI probing may synchronously create and destroy a lot of
@@ -901,7 +897,7 @@ int blk_register_queue(struct gendisk *disk)
 	 * request_queues for non-existent devices never get registered.
 	 */
 	if (!blk_queue_init_done(q)) {
-		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
 	}
 
diff --git a/block/blk.h b/block/blk.h
index 08a5845b03ba..f2ddc71e93da 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -48,62 +48,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q)
 		lockdep_assert_held(q->queue_lock);
 }
 
-static inline void queue_flag_set_unlocked(unsigned int flag,
-					   struct request_queue *q)
-{
-	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
-	    kref_read(&q->kobj.kref))
-		lockdep_assert_held(q->queue_lock);
-	__set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
-					     struct request_queue *q)
-{
-	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
-	    kref_read(&q->kobj.kref))
-		lockdep_assert_held(q->queue_lock);
-	__clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
-					    struct request_queue *q)
-{
-	queue_lockdep_assert_held(q);
-
-	if (test_bit(flag, &q->queue_flags)) {
-		__clear_bit(flag, &q->queue_flags);
-		return 1;
-	}
-
-	return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
-					  struct request_queue *q)
-{
-	queue_lockdep_assert_held(q);
-
-	if (!test_bit(flag, &q->queue_flags)) {
-		__set_bit(flag, &q->queue_flags);
-		return 0;
-	}
-
-	return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
-	queue_lockdep_assert_held(q);
-	__set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
-	queue_lockdep_assert_held(q);
-	__clear_bit(flag, &q->queue_flags);
-}
-
 static inline struct blk_flush_queue *
 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
 {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1aa94759d219..2b887e24837e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -624,7 +624,6 @@ struct request_queue {
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 
 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 05/16] block: remove queue_lockdep_assert_held
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (3 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 04/16] block: use atomic bitops for ->queue_flags Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:55   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle Christoph Hellwig
                   ` (11 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

The only remaining user unconditionally drops and reacquires the lock,
which means we really don't need any additional (conditional) annotation.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-throttle.c |  1 -
 block/blk.h          | 13 -------------
 2 files changed, 14 deletions(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8e6f3c9821c2..a665b0950369 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2353,7 +2353,6 @@ void blk_throtl_drain(struct request_queue *q)
 	struct bio *bio;
 	int rw;
 
-	queue_lockdep_assert_held(q);
 	rcu_read_lock();
 
 	/*
diff --git a/block/blk.h b/block/blk.h
index f2ddc71e93da..027a0ccc175e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -35,19 +35,6 @@ extern struct kmem_cache *blk_requestq_cachep;
 extern struct kobj_type blk_queue_ktype;
 extern struct ida blk_queue_ida;
 
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
-	if (q->queue_lock)
-		lockdep_assert_held(q->queue_lock);
-}
-
 static inline struct blk_flush_queue *
 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
 {
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (4 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 05/16] block: remove queue_lockdep_assert_held Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:56   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 07/16] block: update a few comments for the legacy request removal Christoph Hellwig
                   ` (10 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Unused now that the legacy request path is gone.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-iolatency.c | 24 ++++++------------------
 block/blk-mq.c        |  2 +-
 block/blk-rq-qos.c    |  5 ++---
 block/blk-rq-qos.h    |  4 ++--
 block/blk-wbt.c       | 16 ++++------------
 5 files changed, 15 insertions(+), 36 deletions(-)

diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 38c35c32aff2..8edf1b353ad1 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
 
 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 				       struct iolatency_grp *iolat,
-				       spinlock_t *lock, bool issue_as_root,
+				       bool issue_as_root,
 				       bool use_memdelay)
-	__releases(lock)
-	__acquires(lock)
 {
 	struct rq_wait *rqw = &iolat->rq_wait;
 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
@@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 		if (iolatency_may_queue(iolat, &wait, first_block))
 			break;
 		first_block = false;
-
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else {
-			io_schedule();
-		}
+		io_schedule();
 	} while (1);
 
 	finish_wait(&rqw->wait, &wait);
@@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
 	scale_change(iolat, direction > 0);
 }
 
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
-				     spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 {
 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 	struct blkcg *blkcg;
@@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 	bio_associate_blkcg(bio, &blkcg->css);
 	blkg = blkg_lookup(blkcg, q);
 	if (unlikely(!blkg)) {
-		if (!lock)
-			spin_lock_irq(q->queue_lock);
+		spin_lock_irq(q->queue_lock);
 		blkg = blkg_lookup_create(blkcg, q);
 		if (IS_ERR(blkg))
 			blkg = NULL;
-		if (!lock)
-			spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(q->queue_lock);
 	}
 	if (!blkg)
 		goto out;
@@ -518,7 +506,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 		}
 
 		check_scale_change(iolat);
-		__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
 		blkg = blkg->parent;
 	}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e2717e843727..a3f057fdd045 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1886,7 +1886,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	if (blk_mq_sched_bio_merge(q, bio))
 		return BLK_QC_T_NONE;
 
-	rq_qos_throttle(q, bio, NULL);
+	rq_qos_throttle(q, bio);
 
 	rq = blk_mq_get_request(q, bio, &data);
 	if (unlikely(!rq)) {
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd568dd..f8a4d3fbb98c 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -67,14 +67,13 @@ void rq_qos_requeue(struct request_queue *q, struct request *rq)
 	}
 }
 
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
-		     spinlock_t *lock)
+void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 {
 	struct rq_qos *rqos;
 
 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
 		if (rqos->ops->throttle)
-			rqos->ops->throttle(rqos, bio, lock);
+			rqos->ops->throttle(rqos, bio);
 	}
 }
 
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02efbfa66..b6b11d496007 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -25,7 +25,7 @@ struct rq_qos {
 };
 
 struct rq_qos_ops {
-	void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+	void (*throttle)(struct rq_qos *, struct bio *);
 	void (*track)(struct rq_qos *, struct request *, struct bio *);
 	void (*issue)(struct rq_qos *, struct request *);
 	void (*requeue)(struct rq_qos *, struct request *);
@@ -103,7 +103,7 @@ void rq_qos_done(struct request_queue *, struct request *);
 void rq_qos_issue(struct request_queue *, struct request *);
 void rq_qos_requeue(struct request_queue *, struct request *);
 void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_throttle(struct request_queue *, struct bio *);
 void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
 void rq_qos_exit(struct request_queue *);
 #endif
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0fc222d4194b..e5a66c574683 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -521,9 +521,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
-		       unsigned long rw, spinlock_t *lock)
-	__releases(lock)
-	__acquires(lock)
+		       unsigned long rw)
 {
 	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 	struct wbt_wait_data data = {
@@ -561,13 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 			break;
 		}
 
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else
-			io_schedule();
-
+		io_schedule();
 		has_sleeper = false;
 	} while (1);
 
@@ -624,7 +616,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 {
 	struct rq_wb *rwb = RQWB(rqos);
 	enum wbt_flags flags;
@@ -636,7 +628,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
 		return;
 	}
 
-	__wbt_wait(rwb, flags, bio->bi_opf, lock);
+	__wbt_wait(rwb, flags, bio->bi_opf);
 
 	if (!blk_stat_is_active(rwb->cb))
 		rwb_arm_timer(rwb);
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 07/16] block: update a few comments for the legacy request removal
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (5 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:56   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 08/16] block: remove a few unused exports Christoph Hellwig
                   ` (9 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Only the mq locking is left in the flush state machine.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-flush.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/block/blk-flush.c b/block/blk-flush.c
index c53197dcdd70..fcd18b158fd6 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -148,7 +148,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
  * completion and trigger the next step.
  *
  * CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
  *
  * RETURNS:
  * %true if requests were added to the dispatch queue, %false otherwise.
@@ -252,7 +252,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
  * Please read the comment at the top of this file for more info.
  *
  * CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
  *
  */
 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 08/16] block: remove a few unused exports
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (6 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 07/16] block: update a few comments for the legacy request removal Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:57   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue Christoph Hellwig
                   ` (8 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-cgroup.c   | 6 ------
 block/blk-ioc.c      | 3 ---
 block/blk-mq-sysfs.c | 1 -
 block/blk-softirq.c  | 1 -
 block/blk-stat.c     | 4 ----
 block/blk-wbt.c      | 2 --
 6 files changed, 17 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a95cddb39f1c..3296c0b7353a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -424,7 +424,6 @@ const char *blkg_dev_name(struct blkcg_gq *blkg)
 		return dev_name(blkg->q->backing_dev_info->dev);
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(blkg_dev_name);
 
 /**
  * blkcg_print_blkgs - helper for printing per-blkg data
@@ -860,7 +859,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 	}
 	return ret;
 }
-EXPORT_SYMBOL_GPL(blkg_conf_prep);
 
 /**
  * blkg_conf_finish - finish up per-blkg config update
@@ -876,7 +874,6 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 	rcu_read_unlock();
 	put_disk_and_module(ctx->disk);
 }
-EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
 static int blkcg_print_stat(struct seq_file *sf, void *v)
 {
@@ -1691,7 +1688,6 @@ void blkcg_maybe_throttle_current(void)
 	rcu_read_unlock();
 	blk_put_queue(q);
 }
-EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
 
 /**
  * blkcg_schedule_throttle - this task needs to check for throttling
@@ -1725,7 +1721,6 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
 		current->use_memdelay = use_memdelay;
 	set_notify_resume(current);
 }
-EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
 
 /**
  * blkcg_add_delay - add delay to this blkg
@@ -1740,7 +1735,6 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
 	blkcg_scale_delay(blkg, now);
 	atomic64_add(delta, &blkg->delay_nsec);
 }
-EXPORT_SYMBOL_GPL(blkcg_add_delay);
 
 module_param(blkcg_debug_stats, bool, 0644);
 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 56755ad5ac88..f91ca6b70d6a 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -28,7 +28,6 @@ void get_io_context(struct io_context *ioc)
 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 	atomic_long_inc(&ioc->refcount);
 }
-EXPORT_SYMBOL(get_io_context);
 
 static void icq_free_icq_rcu(struct rcu_head *head)
 {
@@ -160,7 +159,6 @@ void put_io_context(struct io_context *ioc)
 	if (free_ioc)
 		kmem_cache_free(iocontext_cachep, ioc);
 }
-EXPORT_SYMBOL(put_io_context);
 
 /**
  * put_io_context_active - put active reference on ioc
@@ -315,7 +313,6 @@ struct io_context *get_task_io_context(struct task_struct *task,
 
 	return NULL;
 }
-EXPORT_SYMBOL(get_task_io_context);
 
 /**
  * ioc_lookup_icq - lookup io_cq from ioc
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 2d737f9e7ba7..3d25b9c419e9 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -350,7 +350,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(blk_mq_register_dev);
 
 void blk_mq_sysfs_unregister(struct request_queue *q)
 {
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 1534066e306e..457d9ba3eb20 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -143,7 +143,6 @@ void __blk_complete_request(struct request *req)
 
 	local_irq_restore(flags);
 }
-EXPORT_SYMBOL(__blk_complete_request);
 
 static __init int blk_softirq_init(void)
 {
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 90561af85a62..696a04176e4d 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -130,7 +130,6 @@ blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
 
 	return cb;
 }
-EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
 
 void blk_stat_add_callback(struct request_queue *q,
 			   struct blk_stat_callback *cb)
@@ -151,7 +150,6 @@ void blk_stat_add_callback(struct request_queue *q,
 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
 	spin_unlock(&q->stats->lock);
 }
-EXPORT_SYMBOL_GPL(blk_stat_add_callback);
 
 void blk_stat_remove_callback(struct request_queue *q,
 			      struct blk_stat_callback *cb)
@@ -164,7 +162,6 @@ void blk_stat_remove_callback(struct request_queue *q,
 
 	del_timer_sync(&cb->timer);
 }
-EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
 
 static void blk_stat_free_callback_rcu(struct rcu_head *head)
 {
@@ -181,7 +178,6 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
 	if (cb)
 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
 }
-EXPORT_SYMBOL_GPL(blk_stat_free_callback);
 
 void blk_stat_enable_accounting(struct request_queue *q)
 {
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index e5a66c574683..919444d75489 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -754,8 +754,6 @@ void wbt_disable_default(struct request_queue *q)
 	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
 		rwb->wb_normal = 0;
 }
-EXPORT_SYMBOL_GPL(wbt_disable_default);
-
 
 static struct rq_qos_ops wbt_rqos_ops = {
 	.throttle = wbt_wait,
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (7 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 08/16] block: remove a few unused exports Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:58   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all Christoph Hellwig
                   ` (7 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Use a goto label to merge two identical pieces of error handling code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-cgroup.c | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3296c0b7353a..717ab38a6c67 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1173,21 +1173,19 @@ int blkcg_init_queue(struct request_queue *q)
 		radix_tree_preload_end();
 
 	ret = blk_iolatency_init(q);
-	if (ret) {
-		spin_lock_irq(q->queue_lock);
-		blkg_destroy_all(q);
-		spin_unlock_irq(q->queue_lock);
-		return ret;
-	}
+	if (ret)
+		goto err_destroy_all;
 
 	ret = blk_throtl_init(q);
-	if (ret) {
-		spin_lock_irq(q->queue_lock);
-		blkg_destroy_all(q);
-		spin_unlock_irq(q->queue_lock);
-	}
-	return ret;
+	if (ret)
+		goto err_destroy_all;
+	return 0;
 
+err_destroy_all:
+	spin_lock_irq(q->queue_lock);
+	blkg_destroy_all(q);
+	spin_unlock_irq(q->queue_lock);
+	return ret;
 err_unlock:
 	spin_unlock_irq(q->queue_lock);
 	rcu_read_unlock();
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (8 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:58   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 11/16] drbd: don't override the queue_lock Christoph Hellwig
                   ` (6 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-cgroup.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 717ab38a6c67..3ba23b9bfeb9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -349,8 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
 {
 	struct blkcg_gq *blkg, *n;
 
-	lockdep_assert_held(q->queue_lock);
-
+	spin_lock_irq(q->queue_lock);
 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 		struct blkcg *blkcg = blkg->blkcg;
 
@@ -360,6 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
 	}
 
 	q->root_blkg = NULL;
+	spin_unlock_irq(q->queue_lock);
 }
 
 /*
@@ -1182,9 +1182,7 @@ int blkcg_init_queue(struct request_queue *q)
 	return 0;
 
 err_destroy_all:
-	spin_lock_irq(q->queue_lock);
 	blkg_destroy_all(q);
-	spin_unlock_irq(q->queue_lock);
 	return ret;
 err_unlock:
 	spin_unlock_irq(q->queue_lock);
@@ -1222,10 +1220,7 @@ void blkcg_drain_queue(struct request_queue *q)
  */
 void blkcg_exit_queue(struct request_queue *q)
 {
-	spin_lock_irq(q->queue_lock);
 	blkg_destroy_all(q);
-	spin_unlock_irq(q->queue_lock);
-
 	blk_throtl_exit(q);
 }
 
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 11/16] drbd: don't override the queue_lock
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (9 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:58   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 12/16] umem: " Christoph Hellwig
                   ` (5 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

The DRBD req_lock and block layer queue_lock are used for entirely
different resources.  Stop using the req_lock as the block layer
queue_lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/block/drbd/drbd_main.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa8204214ac0..b66c59ce6260 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
 
 	drbd_init_set_defaults(device);
 
-	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock);
+	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
 	if (!q)
 		goto out_no_q;
 	device->rq_queue = q;
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 12/16] umem: don't override the queue_lock
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (10 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 11/16] drbd: don't override the queue_lock Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  6:59   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
                   ` (4 subsequent siblings)
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

The umem card->lock and the block layer queue_lock are used for entirely
different resources.  Stop using card->lock as the block layer
queue_lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/block/umem.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be3e3ab79950..8a27b5adc2b3 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -888,8 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	card->biotail = &card->bio;
 	spin_lock_init(&card->lock);
 
-	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE,
-					   &card->lock);
+	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
 	if (!card->queue)
 		goto failed_alloc;
 
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 13/16] mmc: simplify queue initialization
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (11 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 12/16] umem: " Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-14 17:31   ` Ulf Hansson
  2018-11-15  7:00   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
                   ` (3 subsequent siblings)
  16 siblings, 2 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

Merge three functions initializing the queue into a single one, and drop
an unused argument for it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/mmc/core/block.c |  2 +-
 drivers/mmc/core/queue.c | 86 ++++++++++++++--------------------------
 drivers/mmc/core/queue.h |  3 +-
 3 files changed, 32 insertions(+), 59 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c35b5b08bb33..27606e1382e5 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2334,7 +2334,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
 	INIT_LIST_HEAD(&md->rpmbs);
 	md->usage = 1;
 
-	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+	ret = mmc_init_queue(&md->queue, card, &md->lock);
 	if (ret)
 		goto err_putdisk;
 
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 6edffeed9953..37617fb1f9de 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -378,14 +378,38 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
 	init_waitqueue_head(&mq->wait);
 }
 
-static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
-			     const struct blk_mq_ops *mq_ops, spinlock_t *lock)
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+		   spinlock_t *lock)
 {
+	struct mmc_host *host = card->host;
 	int ret;
 
+	mq->card = card;
+	mq->use_cqe = host->cqe_enabled;
+
 	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
-	mq->tag_set.ops = mq_ops;
-	mq->tag_set.queue_depth = q_depth;
+	mq->tag_set.ops = &mmc_mq_ops;
+	/*
+	 * The queue depth for CQE must match the hardware because the request
+	 * tag is used to index the hardware queue.
+	 */
+	if (mq->use_cqe)
+		mq->tag_set.queue_depth =
+			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+	else
+		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
 	mq->tag_set.numa_node = NUMA_NO_NODE;
 	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
 			    BLK_MQ_F_BLOCKING;
@@ -405,66 +429,16 @@ static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
 
 	mq->queue->queue_lock = lock;
 	mq->queue->queuedata = mq;
+	blk_queue_rq_timeout(mq->queue, 60 * HZ);
 
+	mmc_setup_queue(mq, card);
 	return 0;
 
 free_tag_set:
 	blk_mq_free_tag_set(&mq->tag_set);
-
 	return ret;
 }
 
-/* Set queue depth to get a reasonable value for q->nr_requests */
-#define MMC_QUEUE_DEPTH 64
-
-static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
-			 spinlock_t *lock)
-{
-	struct mmc_host *host = card->host;
-	int q_depth;
-	int ret;
-
-	/*
-	 * The queue depth for CQE must match the hardware because the request
-	 * tag is used to index the hardware queue.
-	 */
-	if (mq->use_cqe)
-		q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
-	else
-		q_depth = MMC_QUEUE_DEPTH;
-
-	ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
-	if (ret)
-		return ret;
-
-	blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
-	mmc_setup_queue(mq, card);
-
-	return 0;
-}
-
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
-		   spinlock_t *lock, const char *subname)
-{
-	struct mmc_host *host = card->host;
-
-	mq->card = card;
-
-	mq->use_cqe = host->cqe_enabled;
-
-	return mmc_mq_init(mq, card, lock);
-}
-
 void mmc_queue_suspend(struct mmc_queue *mq)
 {
 	blk_mq_quiesce_queue(mq->queue);
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 9bf3c9245075..29218e12900d 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -95,8 +95,7 @@ struct mmc_queue {
 	struct work_struct	complete_work;
 };
 
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
-			  const char *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
 extern void mmc_cleanup_queue(struct mmc_queue *);
 extern void mmc_queue_suspend(struct mmc_queue *);
 extern void mmc_queue_resume(struct mmc_queue *);
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 14/16] mmc: stop abusing the request queue_lock pointer
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (12 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-14 17:56   ` Ulf Hansson
  2018-11-15  7:00   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node Christoph Hellwig
                   ` (2 subsequent siblings)
  16 siblings, 2 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

mmc uses the block layer struct request pointer to indirect their own
lock to the mmc_queue structure, given that the original lock isn't
reachable outside of block.c.  Add a lock pointer to struct mmc_queue
instead and stop overriding the block layer lock which protects fields
entirely separate from the mmc use.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/mmc/core/block.c | 22 ++++++++++------------
 drivers/mmc/core/queue.c | 26 +++++++++++++-------------
 drivers/mmc/core/queue.h |  1 +
 3 files changed, 24 insertions(+), 25 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 27606e1382e5..70ec465beb69 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1483,7 +1483,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
 		blk_mq_end_request(req, BLK_STS_OK);
 	}
 
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(mq->lock, flags);
 
 	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
 
@@ -1491,7 +1491,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
 
 	mmc_cqe_check_busy(mq);
 
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(mq->lock, flags);
 
 	if (!mq->cqe_busy)
 		blk_mq_run_hw_queues(q, true);
@@ -1988,17 +1988,16 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
 
 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
 {
-	struct request_queue *q = req->q;
 	unsigned long flags;
 	bool put_card;
 
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(mq->lock, flags);
 
 	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
 
 	put_card = (mmc_tot_in_flight(mq) == 0);
 
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(mq->lock, flags);
 
 	if (put_card)
 		mmc_put_card(mq->card, &mq->ctx);
@@ -2094,11 +2093,11 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
 		 * request does not need to wait (although it does need to
 		 * complete complete_req first).
 		 */
-		spin_lock_irqsave(q->queue_lock, flags);
+		spin_lock_irqsave(mq->lock, flags);
 		mq->complete_req = req;
 		mq->rw_wait = false;
 		waiting = mq->waiting;
-		spin_unlock_irqrestore(q->queue_lock, flags);
+		spin_unlock_irqrestore(mq->lock, flags);
 
 		/*
 		 * If 'waiting' then the waiting task will complete this
@@ -2117,10 +2116,10 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
 	/* Take the recovery path for errors or urgent background operations */
 	if (mmc_blk_rq_error(&mqrq->brq) ||
 	    mmc_blk_urgent_bkops_needed(mq, mqrq)) {
-		spin_lock_irqsave(q->queue_lock, flags);
+		spin_lock_irqsave(mq->lock, flags);
 		mq->recovery_needed = true;
 		mq->recovery_req = req;
-		spin_unlock_irqrestore(q->queue_lock, flags);
+		spin_unlock_irqrestore(mq->lock, flags);
 		wake_up(&mq->wait);
 		schedule_work(&mq->recovery_work);
 		return;
@@ -2136,7 +2135,6 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
 
 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
 {
-	struct request_queue *q = mq->queue;
 	unsigned long flags;
 	bool done;
 
@@ -2144,7 +2142,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
 	 * Wait while there is another request in progress, but not if recovery
 	 * is needed. Also indicate whether there is a request waiting to start.
 	 */
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(mq->lock, flags);
 	if (mq->recovery_needed) {
 		*err = -EBUSY;
 		done = true;
@@ -2152,7 +2150,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
 		done = !mq->rw_wait;
 	}
 	mq->waiting = !done;
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(mq->lock, flags);
 
 	return done;
 }
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 37617fb1f9de..ac6a5245275a 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -89,9 +89,9 @@ void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
 	struct mmc_queue *mq = q->queuedata;
 	unsigned long flags;
 
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(mq->lock, flags);
 	__mmc_cqe_recovery_notifier(mq);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(mq->lock, flags);
 }
 
 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
@@ -128,14 +128,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(mq->lock, flags);
 
 	if (mq->recovery_needed || !mq->use_cqe)
 		ret = BLK_EH_RESET_TIMER;
 	else
 		ret = mmc_cqe_timed_out(req);
 
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(mq->lock, flags);
 
 	return ret;
 }
@@ -157,9 +157,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
 
 	mq->in_recovery = false;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(mq->lock);
 	mq->recovery_needed = false;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(mq->lock);
 
 	mmc_put_card(mq->card, &mq->ctx);
 
@@ -258,10 +258,10 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	issue_type = mmc_issue_type(mq, req);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(mq->lock);
 
 	if (mq->recovery_needed || mq->busy) {
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(mq->lock);
 		return BLK_STS_RESOURCE;
 	}
 
@@ -269,7 +269,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	case MMC_ISSUE_DCMD:
 		if (mmc_cqe_dcmd_busy(mq)) {
 			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
-			spin_unlock_irq(q->queue_lock);
+			spin_unlock_irq(mq->lock);
 			return BLK_STS_RESOURCE;
 		}
 		break;
@@ -294,7 +294,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	get_card = (mmc_tot_in_flight(mq) == 1);
 	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
 
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(mq->lock);
 
 	if (!(req->rq_flags & RQF_DONTPREP)) {
 		req_to_mmc_queue_req(req)->retries = 0;
@@ -328,12 +328,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (issued != MMC_REQ_STARTED) {
 		bool put_card = false;
 
-		spin_lock_irq(q->queue_lock);
+		spin_lock_irq(mq->lock);
 		mq->in_flight[issue_type] -= 1;
 		if (mmc_tot_in_flight(mq) == 0)
 			put_card = true;
 		mq->busy = false;
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(mq->lock);
 		if (put_card)
 			mmc_put_card(card, &mq->ctx);
 	} else {
@@ -397,6 +397,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 	int ret;
 
 	mq->card = card;
+	mq->lock = lock;
 	mq->use_cqe = host->cqe_enabled;
 
 	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
@@ -427,7 +428,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 		goto free_tag_set;
 	}
 
-	mq->queue->queue_lock = lock;
 	mq->queue->queuedata = mq;
 	blk_queue_rq_timeout(mq->queue, 60 * HZ);
 
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 29218e12900d..5421f1542e71 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -73,6 +73,7 @@ struct mmc_queue_req {
 
 struct mmc_queue {
 	struct mmc_card		*card;
+	spinlock_t		*lock;
 	struct mmc_ctx		ctx;
 	struct blk_mq_tag_set	tag_set;
 	struct mmc_blk_data	*blkdata;
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (13 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  7:00   ` Hannes Reinecke
  2018-11-14 16:02 ` [PATCH 16/16] block: remove the queue_lock indirection Christoph Hellwig
  2018-11-15 19:14 ` remove more legacy request leftover Jens Axboe
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

With the legacy request path gone there is no real need to override the
queue_lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c               | 16 +++-------------
 block/blk-mq.c                 |  2 +-
 drivers/block/drbd/drbd_main.c |  2 +-
 drivers/block/null_blk_main.c  |  3 +--
 drivers/block/umem.c           |  2 +-
 drivers/lightnvm/core.c        |  2 +-
 drivers/md/dm.c                |  2 +-
 drivers/nvdimm/pmem.c          |  2 +-
 drivers/nvme/host/multipath.c  |  2 +-
 include/linux/blkdev.h         |  3 +--
 10 files changed, 12 insertions(+), 24 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 5c8e66a09d82..3f94c9de0252 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(blk_cleanup_queue);
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
-	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
+	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
@@ -473,17 +473,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
  * blk_alloc_queue_node - allocate a request queue
  * @gfp_mask: memory allocation flags
  * @node_id: NUMA node to allocate memory from
- * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
- *        serialize calls to the legacy .request_fn() callback. Ignored for
- *	  blk-mq request queues.
- *
- * Note: pass the queue lock as the third argument to this function instead of
- * setting the queue lock pointer explicitly to avoid triggering a sporadic
- * crash in the blkcg code. This function namely calls blkcg_init_queue() and
- * the queue lock pointer must be set before blkcg_init_queue() is called.
  */
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
-					   spinlock_t *lock)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
 	struct request_queue *q;
 	int ret;
@@ -534,8 +525,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 #endif
 	mutex_init(&q->sysfs_lock);
 	spin_lock_init(&q->__queue_lock);
-
-	q->queue_lock = lock ? : &q->__queue_lock;
+	q->queue_lock = &q->__queue_lock;
 
 	init_waitqueue_head(&q->mq_freeze_wq);
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a3f057fdd045..3b823891b3ef 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2548,7 +2548,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
 	struct request_queue *uninit_q, *q;
 
-	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
+	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
 	if (!uninit_q)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index b66c59ce6260..f973a2a845c8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
 
 	drbd_init_set_defaults(device);
 
-	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
+	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
 	if (!q)
 		goto out_no_q;
 	device->rq_queue = q;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 63c23fcfc4df..62c9654b9ce8 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1659,8 +1659,7 @@ static int null_add_dev(struct nullb_device *dev)
 		}
 		null_init_queues(nullb);
 	} else if (dev->queue_mode == NULL_Q_BIO) {
-		nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
-						NULL);
+		nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
 		if (!nullb->q) {
 			rv = -ENOMEM;
 			goto out_cleanup_queues;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 8a27b5adc2b3..aa035cf8a51d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -888,7 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	card->biotail = &card->bio;
 	spin_lock_init(&card->lock);
 
-	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
+	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
 	if (!card->queue)
 		goto failed_alloc;
 
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index efb976a863d2..60ab11fcc81c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -389,7 +389,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 		goto err_dev;
 	}
 
-	tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
+	tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
 	if (!tqueue) {
 		ret = -ENOMEM;
 		goto err_disk;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c510179a7f84..a733e4c920af 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1896,7 +1896,7 @@ static struct mapped_device *alloc_dev(int minor)
 	INIT_LIST_HEAD(&md->table_devices);
 	spin_lock_init(&md->uevent_lock);
 
-	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
+	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
 	if (!md->queue)
 		goto bad;
 	md->queue->queuedata = md;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 0e39e3d1846f..f7019294740c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
 		return -EBUSY;
 	}
 
-	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
+	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
 	if (!q)
 		return -ENOMEM;
 
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..b82b0d3ca39a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -276,7 +276,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
 	if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
 		return 0;
 
-	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
+	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
 	if (!q)
 		goto out;
 	q->queuedata = head;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2b887e24837e..666c73b97b0d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1125,8 +1125,7 @@ extern long nr_blockdev_pages(void);
 
 bool __must_check blk_get_queue(struct request_queue *);
 struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
-					   spinlock_t *lock);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
 extern void blk_put_queue(struct request_queue *);
 extern void blk_set_queue_dying(struct request_queue *);
 
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 16/16] block: remove the queue_lock indirection
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (14 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node Christoph Hellwig
@ 2018-11-14 16:02 ` Christoph Hellwig
  2018-11-15  7:01   ` Hannes Reinecke
  2018-11-15 19:14 ` remove more legacy request leftover Jens Axboe
  16 siblings, 1 reply; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-14 16:02 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bfq-cgroup.c      |  2 +-
 block/bfq-iosched.c     | 16 +++++------
 block/blk-cgroup.c      | 60 ++++++++++++++++++++---------------------
 block/blk-core.c        | 10 +------
 block/blk-ioc.c         | 14 +++++-----
 block/blk-iolatency.c   |  4 +--
 block/blk-mq-sched.c    |  4 +--
 block/blk-pm.c          | 20 +++++++-------
 block/blk-pm.h          |  6 ++---
 block/blk-sysfs.c       |  4 +--
 block/blk-throttle.c    | 22 +++++++--------
 drivers/block/pktcdvd.c |  4 +--
 drivers/ide/ide-pm.c    | 10 +++----
 include/linux/blkdev.h  |  8 +-----
 14 files changed, 85 insertions(+), 99 deletions(-)

diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fe5952d117d..a7a1712632b0 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
 
 	parent = bfqg_parent(bfqg);
 
-	lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
+	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
 
 	if (unlikely(!parent))
 		return;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index c7636cbefc85..67b22c924aee 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
 		unsigned long flags;
 		struct bfq_io_cq *icq;
 
-		spin_lock_irqsave(q->queue_lock, flags);
+		spin_lock_irqsave(&q->queue_lock, flags);
 		icq = icq_to_bic(ioc_lookup_icq(ioc, q));
-		spin_unlock_irqrestore(q->queue_lock, flags);
+		spin_unlock_irqrestore(&q->queue_lock, flags);
 
 		return icq;
 	}
@@ -4034,7 +4034,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
 	 * In addition, the following queue lock guarantees that
 	 * bfqq_group(bfqq) exists as well.
 	 */
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (idle_timer_disabled)
 		/*
 		 * Since the idle timer has been disabled,
@@ -4053,7 +4053,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
 		bfqg_stats_set_start_empty_time(bfqg);
 		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
 	}
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 #else
 static inline void bfq_update_dispatch_stats(struct request_queue *q,
@@ -4637,11 +4637,11 @@ static void bfq_update_insert_stats(struct request_queue *q,
 	 * In addition, the following queue lock guarantees that
 	 * bfqq_group(bfqq) exists as well.
 	 */
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
 	if (idle_timer_disabled)
 		bfqg_stats_update_idle_time(bfqq_group(bfqq));
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 #else
 static inline void bfq_update_insert_stats(struct request_queue *q,
@@ -5382,9 +5382,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
 	}
 	eq->elevator_data = bfqd;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	q->elevator = eq;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	/*
 	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3ba23b9bfeb9..fa540f1cb9e0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 	if (blkg && blkg->q == q) {
 		if (update_hint) {
-			lockdep_assert_held(q->queue_lock);
+			lockdep_assert_held(&q->queue_lock);
 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 		}
 		return blkg;
@@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 	int i, ret;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	/* blkg holds a reference to blkcg */
 	if (!css_tryget_online(&blkcg->css)) {
@@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 	struct blkcg_gq *blkg;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	blkg = __blkg_lookup(blkcg, q, true);
 	if (blkg)
@@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
 	struct blkcg_gq *parent = blkg->parent;
 	int i;
 
-	lockdep_assert_held(blkg->q->queue_lock);
+	lockdep_assert_held(&blkg->q->queue_lock);
 	lockdep_assert_held(&blkcg->lock);
 
 	/* Something wrong if we are trying to remove same group twice */
@@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
 {
 	struct blkcg_gq *blkg, *n;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 		struct blkcg *blkcg = blkg->blkcg;
 
@@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
 	}
 
 	q->root_blkg = NULL;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 
 /*
@@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
-		spin_lock_irq(blkg->q->queue_lock);
+		spin_lock_irq(&blkg->q->queue_lock);
 		if (blkcg_policy_enabled(blkg->q, pol))
 			total += prfill(sf, blkg->pd[pol->plid], data);
-		spin_unlock_irq(blkg->q->queue_lock);
+		spin_unlock_irq(&blkg->q->queue_lock);
 	}
 	rcu_read_unlock();
 
@@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 	struct cgroup_subsys_state *pos_css;
 	u64 sum = 0;
 
-	lockdep_assert_held(blkg->q->queue_lock);
+	lockdep_assert_held(&blkg->q->queue_lock);
 
 	rcu_read_lock();
 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 	struct blkg_rwstat sum = { };
 	int i;
 
-	lockdep_assert_held(blkg->q->queue_lock);
+	lockdep_assert_held(&blkg->q->queue_lock);
 
 	rcu_read_lock();
 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 					  struct request_queue *q)
 {
 	WARN_ON_ONCE(!rcu_read_lock_held());
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	if (!blkcg_policy_enabled(q, pol))
 		return ERR_PTR(-EOPNOTSUPP);
@@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
  */
 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 		   char *input, struct blkg_conf_ctx *ctx)
-	__acquires(rcu) __acquires(disk->queue->queue_lock)
+	__acquires(rcu) __acquires(&disk->queue->queue_lock)
 {
 	struct gendisk *disk;
 	struct request_queue *q;
@@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 	q = disk->queue;
 
 	rcu_read_lock();
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 
 	blkg = blkg_lookup_check(blkcg, pol, q);
 	if (IS_ERR(blkg)) {
@@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 		}
 
 		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(&q->queue_lock);
 		rcu_read_unlock();
 
 		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 		}
 
 		rcu_read_lock();
-		spin_lock_irq(q->queue_lock);
+		spin_lock_irq(&q->queue_lock);
 
 		blkg = blkg_lookup_check(pos, pol, q);
 		if (IS_ERR(blkg)) {
@@ -843,7 +843,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 	return 0;
 
 fail_unlock:
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 	rcu_read_unlock();
 fail:
 	put_disk_and_module(disk);
@@ -868,9 +868,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  * with blkg_conf_prep().
  */
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
-	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
+	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
 {
-	spin_unlock_irq(ctx->disk->queue->queue_lock);
+	spin_unlock_irq(&ctx->disk->queue->queue_lock);
 	rcu_read_unlock();
 	put_disk_and_module(ctx->disk);
 }
@@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
 		 */
 		off += scnprintf(buf+off, size-off, "%s ", dname);
 
-		spin_lock_irq(blkg->q->queue_lock);
+		spin_lock_irq(&blkg->q->queue_lock);
 
 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 					offsetof(struct blkcg_gq, stat_bytes));
@@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
 		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 		dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
 
-		spin_unlock_irq(blkg->q->queue_lock);
+		spin_unlock_irq(&blkg->q->queue_lock);
 
 		if (rbytes || wbytes || rios || wios) {
 			has_stats = true;
@@ -1038,7 +1038,7 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
 						struct blkcg_gq, blkcg_node);
 		struct request_queue *q = blkg->q;
 
-		if (spin_trylock(q->queue_lock)) {
+		if (spin_trylock(&q->queue_lock)) {
 			blkg_destroy(blkg);
 			spin_unlock(q->queue_lock);
 		} else {
@@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q)
 
 	/* Make sure the root blkg exists. */
 	rcu_read_lock();
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	blkg = blkg_create(&blkcg_root, q, new_blkg);
 	if (IS_ERR(blkg))
 		goto err_unlock;
 	q->root_blkg = blkg;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 	rcu_read_unlock();
 
 	if (preloaded)
@@ -1185,7 +1185,7 @@ int blkcg_init_queue(struct request_queue *q)
 	blkg_destroy_all(q);
 	return ret;
 err_unlock:
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 	rcu_read_unlock();
 	if (preloaded)
 		radix_tree_preload_end();
@@ -1200,7 +1200,7 @@ int blkcg_init_queue(struct request_queue *q)
  */
 void blkcg_drain_queue(struct request_queue *q)
 {
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	/*
 	 * @q could be exiting and already have destroyed all blkgs as
@@ -1335,7 +1335,7 @@ int blkcg_activate_policy(struct request_queue *q,
 		}
 	}
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 
 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
 		struct blkg_policy_data *pd;
@@ -1347,7 +1347,7 @@ int blkcg_activate_policy(struct request_queue *q,
 		if (!pd)
 			swap(pd, pd_prealloc);
 		if (!pd) {
-			spin_unlock_irq(q->queue_lock);
+			spin_unlock_irq(&q->queue_lock);
 			goto pd_prealloc;
 		}
 
@@ -1361,7 +1361,7 @@ int blkcg_activate_policy(struct request_queue *q,
 	__set_bit(pol->plid, q->blkcg_pols);
 	ret = 0;
 
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 out_bypass_end:
 	if (q->mq_ops)
 		blk_mq_unfreeze_queue(q);
@@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
 	if (q->mq_ops)
 		blk_mq_freeze_queue(q);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 
 	__clear_bit(pol->plid, q->blkcg_pols);
 
@@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
 		}
 	}
 
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (q->mq_ops)
 		blk_mq_unfreeze_queue(q);
diff --git a/block/blk-core.c b/block/blk-core.c
index 3f94c9de0252..92b6b200e9fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -327,8 +327,6 @@ void blk_exit_queue(struct request_queue *q)
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
-	spinlock_t *lock = q->queue_lock;
-
 	/* mark @q DYING, no new request or merges will be allowed afterwards */
 	mutex_lock(&q->sysfs_lock);
 	blk_set_queue_dying(q);
@@ -381,11 +379,6 @@ void blk_cleanup_queue(struct request_queue *q)
 
 	percpu_ref_exit(&q->q_usage_counter);
 
-	spin_lock_irq(lock);
-	if (q->queue_lock != &q->__queue_lock)
-		q->queue_lock = &q->__queue_lock;
-	spin_unlock_irq(lock);
-
 	/* @q is and will stay empty, shutdown and put */
 	blk_put_queue(q);
 }
@@ -524,8 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	mutex_init(&q->blk_trace_mutex);
 #endif
 	mutex_init(&q->sysfs_lock);
-	spin_lock_init(&q->__queue_lock);
-	q->queue_lock = &q->__queue_lock;
+	spin_lock_init(&q->queue_lock);
 
 	init_waitqueue_head(&q->mq_freeze_wq);
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index f91ca6b70d6a..5ed59ac6ae58 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -110,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work)
 						struct io_cq, ioc_node);
 		struct request_queue *q = icq->q;
 
-		if (spin_trylock(q->queue_lock)) {
+		if (spin_trylock(&q->queue_lock)) {
 			ioc_destroy_icq(icq);
-			spin_unlock(q->queue_lock);
+			spin_unlock(&q->queue_lock);
 		} else {
 			spin_unlock_irqrestore(&ioc->lock, flags);
 			cpu_relax();
@@ -233,9 +233,9 @@ void ioc_clear_queue(struct request_queue *q)
 {
 	LIST_HEAD(icq_list);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	list_splice_init(&q->icq_list, &icq_list);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	__ioc_clear_queue(&icq_list);
 }
@@ -326,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
 {
 	struct io_cq *icq;
 
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	/*
 	 * icq's are indexed from @ioc using radix tree and hint pointer,
@@ -385,7 +385,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
 	INIT_HLIST_NODE(&icq->ioc_node);
 
 	/* lock both q and ioc and try to link @icq */
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	spin_lock(&ioc->lock);
 
 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
@@ -401,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
 	}
 
 	spin_unlock(&ioc->lock);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 	radix_tree_preload_end();
 	return icq;
 }
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 8edf1b353ad1..5f7f1773be61 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -485,11 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 	bio_associate_blkcg(bio, &blkcg->css);
 	blkg = blkg_lookup(blkcg, q);
 	if (unlikely(!blkg)) {
-		spin_lock_irq(q->queue_lock);
+		spin_lock_irq(&q->queue_lock);
 		blkg = blkg_lookup_create(blkcg, q);
 		if (IS_ERR(blkg))
 			blkg = NULL;
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(&q->queue_lock);
 	}
 	if (!blkg)
 		goto out;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 66fda19be5a3..d084f731d104 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -37,9 +37,9 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
 	struct io_context *ioc = rq_ioc(bio);
 	struct io_cq *icq;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	icq = ioc_lookup_icq(ioc, q);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (!icq) {
 		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
diff --git a/block/blk-pm.c b/block/blk-pm.c
index f8fdae01bea2..0a028c189897 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -89,12 +89,12 @@ int blk_pre_runtime_suspend(struct request_queue *q)
 	/* Switch q_usage_counter back to per-cpu mode. */
 	blk_mq_unfreeze_queue(q);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (ret < 0)
 		pm_runtime_mark_last_busy(q->dev);
 	else
 		q->rpm_status = RPM_SUSPENDING;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (ret)
 		blk_clear_pm_only(q);
@@ -121,14 +121,14 @@ void blk_post_runtime_suspend(struct request_queue *q, int err)
 	if (!q->dev)
 		return;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (!err) {
 		q->rpm_status = RPM_SUSPENDED;
 	} else {
 		q->rpm_status = RPM_ACTIVE;
 		pm_runtime_mark_last_busy(q->dev);
 	}
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (err)
 		blk_clear_pm_only(q);
@@ -151,9 +151,9 @@ void blk_pre_runtime_resume(struct request_queue *q)
 	if (!q->dev)
 		return;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	q->rpm_status = RPM_RESUMING;
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 EXPORT_SYMBOL(blk_pre_runtime_resume);
 
@@ -176,7 +176,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
 	if (!q->dev)
 		return;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (!err) {
 		q->rpm_status = RPM_ACTIVE;
 		pm_runtime_mark_last_busy(q->dev);
@@ -184,7 +184,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
 	} else {
 		q->rpm_status = RPM_SUSPENDED;
 	}
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (!err)
 		blk_clear_pm_only(q);
@@ -207,10 +207,10 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
  */
 void blk_set_runtime_active(struct request_queue *q)
 {
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	q->rpm_status = RPM_ACTIVE;
 	pm_runtime_mark_last_busy(q->dev);
 	pm_request_autosuspend(q->dev);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
index a8564ea72a41..ea5507d23e75 100644
--- a/block/blk-pm.h
+++ b/block/blk-pm.h
@@ -21,7 +21,7 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
 
 static inline void blk_pm_requeue_request(struct request *rq)
 {
-	lockdep_assert_held(rq->q->queue_lock);
+	lockdep_assert_held(&rq->q->queue_lock);
 
 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 		rq->q->nr_pending--;
@@ -30,7 +30,7 @@ static inline void blk_pm_requeue_request(struct request *rq)
 static inline void blk_pm_add_request(struct request_queue *q,
 				      struct request *rq)
 {
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(&q->queue_lock);
 
 	if (q->dev && !(rq->rq_flags & RQF_PM))
 		q->nr_pending++;
@@ -38,7 +38,7 @@ static inline void blk_pm_add_request(struct request_queue *q,
 
 static inline void blk_pm_put_request(struct request *rq)
 {
-	lockdep_assert_held(rq->q->queue_lock);
+	lockdep_assert_held(&rq->q->queue_lock);
 
 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 		--rq->q->nr_pending;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 22fd086eba9f..1e370207a20e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -238,10 +238,10 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
 		return -EINVAL;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	q->limits.max_sectors = max_sectors_kb << 1;
 	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	return ret;
 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a665b0950369..d0a23f0bb3ed 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1243,7 +1243,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
 	bool dispatched;
 	int ret;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (throtl_can_upgrade(td, NULL))
 		throtl_upgrade_state(td);
 
@@ -1266,9 +1266,9 @@ static void throtl_pending_timer_fn(struct timer_list *t)
 			break;
 
 		/* this dispatch windows is still open, relax and repeat */
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(&q->queue_lock);
 		cpu_relax();
-		spin_lock_irq(q->queue_lock);
+		spin_lock_irq(&q->queue_lock);
 	}
 
 	if (!dispatched)
@@ -1290,7 +1290,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
 		queue_work(kthrotld_workqueue, &td->dispatch_work);
 	}
 out_unlock:
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 }
 
 /**
@@ -1314,11 +1314,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
 
 	bio_list_init(&bio_list_on_stack);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	for (rw = READ; rw <= WRITE; rw++)
 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
 			bio_list_add(&bio_list_on_stack, bio);
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	if (!bio_list_empty(&bio_list_on_stack)) {
 		blk_start_plug(&plug);
@@ -2141,7 +2141,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
 		goto out;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 
 	throtl_update_latency_buckets(td);
 
@@ -2224,7 +2224,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 	}
 
 out_unlock:
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 out:
 	bio_set_flag(bio, BIO_THROTTLED);
 
@@ -2345,7 +2345,7 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq)
  * Dispatch all currently throttled bios on @q through ->make_request_fn().
  */
 void blk_throtl_drain(struct request_queue *q)
-	__releases(q->queue_lock) __acquires(q->queue_lock)
+	__releases(&q->queue_lock) __acquires(&q->queue_lock)
 {
 	struct throtl_data *td = q->td;
 	struct blkcg_gq *blkg;
@@ -2368,7 +2368,7 @@ void blk_throtl_drain(struct request_queue *q)
 	tg_drain_bios(&td->service_queue);
 
 	rcu_read_unlock();
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 
 	/* all bios now should be in td->service_queue, issue them */
 	for (rw = READ; rw <= WRITE; rw++)
@@ -2376,7 +2376,7 @@ void blk_throtl_drain(struct request_queue *q)
 						NULL)))
 			generic_make_request(bio);
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 }
 
 int blk_throtl_init(struct request_queue *q)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9381f4e3b221..4adf4c8861cd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2203,9 +2203,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
 		 * Some CDRW drives can not handle writes larger than one packet,
 		 * even if the size is a multiple of the packet size.
 		 */
-		spin_lock_irq(q->queue_lock);
+		spin_lock_irq(&q->queue_lock);
 		blk_queue_max_hw_sectors(q, pd->settings.size);
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(&q->queue_lock);
 		set_bit(PACKET_WRITABLE, &pd->flags);
 	} else {
 		pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index a8c53c98252d..51fe10ac02fa 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -44,15 +44,15 @@ static int ide_pm_execute_rq(struct request *rq)
 {
 	struct request_queue *q = rq->q;
 
-	spin_lock_irq(q->queue_lock);
+	spin_lock_irq(&q->queue_lock);
 	if (unlikely(blk_queue_dying(q))) {
 		rq->rq_flags |= RQF_QUIET;
 		scsi_req(rq)->result = -ENXIO;
-		spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(&q->queue_lock);
 		blk_mq_end_request(rq, BLK_STS_OK);
 		return -ENXIO;
 	}
-	spin_unlock_irq(q->queue_lock);
+	spin_unlock_irq(&q->queue_lock);
 	blk_execute_rq(q, NULL, rq, true);
 
 	return scsi_req(rq)->result ? -EIO : 0;
@@ -214,12 +214,12 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 	printk("%s: completing PM request, %s\n", drive->name,
 	       (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
 #endif
-	spin_lock_irqsave(q->queue_lock, flags);
+	spin_lock_irqsave(&q->queue_lock, flags);
 	if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
 		blk_mq_stop_hw_queues(q);
 	else
 		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	spin_unlock_irqrestore(&q->queue_lock, flags);
 
 	drive->hwif->rq = NULL;
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 666c73b97b0d..7019360635d8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -446,13 +446,7 @@ struct request_queue {
 	 */
 	gfp_t			bounce_gfp;
 
-	/*
-	 * protects queue structures from reentrancy. ->__queue_lock should
-	 * _never_ be used directly, it is queue private. always use
-	 * ->queue_lock.
-	 */
-	spinlock_t		__queue_lock;
-	spinlock_t		*queue_lock;
+	spinlock_t		queue_lock;
 
 	/*
 	 * queue kobject
-- 
2.19.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* Re: [PATCH 13/16] mmc: simplify queue initialization
  2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
@ 2018-11-14 17:31   ` Ulf Hansson
  2018-11-15  9:02     ` Christoph Hellwig
  2018-11-15  7:00   ` Hannes Reinecke
  1 sibling, 1 reply; 40+ messages in thread
From: Ulf Hansson @ 2018-11-14 17:31 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, linux-block, linux-mmc, drbd-dev

On 14 November 2018 at 17:02, Christoph Hellwig <hch@lst.de> wrote:
> Merge three functions initializing the queue into a single one, and drop
> an unused argument for it.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/mmc/core/block.c |  2 +-
>  drivers/mmc/core/queue.c | 86 ++++++++++++++--------------------------
>  drivers/mmc/core/queue.h |  3 +-
>  3 files changed, 32 insertions(+), 59 deletions(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index c35b5b08bb33..27606e1382e5 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -2334,7 +2334,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
>         INIT_LIST_HEAD(&md->rpmbs);
>         md->usage = 1;
>
> -       ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
> +       ret = mmc_init_queue(&md->queue, card, &md->lock);
>         if (ret)
>                 goto err_putdisk;
>
> diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
> index 6edffeed9953..37617fb1f9de 100644
> --- a/drivers/mmc/core/queue.c
> +++ b/drivers/mmc/core/queue.c
> @@ -378,14 +378,38 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
>         init_waitqueue_head(&mq->wait);
>  }
>
> -static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
> -                            const struct blk_mq_ops *mq_ops, spinlock_t *lock)
> +/* Set queue depth to get a reasonable value for q->nr_requests */
> +#define MMC_QUEUE_DEPTH 64
> +
> +/**
> + * mmc_init_queue - initialise a queue structure.
> + * @mq: mmc queue
> + * @card: mmc card to attach this queue
> + * @lock: queue lock
> + * @subname: partition subname

Drop subname :-)

> + *
> + * Initialise a MMC card request queue.
> + */
> +int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
> +                  spinlock_t *lock)
>  {

[...]

A very nice cleanup, thanks!

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

Kind regards
Uffe

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer
  2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
@ 2018-11-14 17:56   ` Ulf Hansson
  2018-11-15  9:03     ` Christoph Hellwig
  2018-11-15  7:00   ` Hannes Reinecke
  1 sibling, 1 reply; 40+ messages in thread
From: Ulf Hansson @ 2018-11-14 17:56 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, linux-block, linux-mmc, drbd-dev

On 14 November 2018 at 17:02, Christoph Hellwig <hch@lst.de> wrote:
> mmc uses the block layer struct request pointer to indirect their own
> lock to the mmc_queue structure, given that the original lock isn't
> reachable outside of block.c.  Add a lock pointer to struct mmc_queue
> instead and stop overriding the block layer lock which protects fields
> entirely separate from the mmc use.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/mmc/core/block.c | 22 ++++++++++------------
>  drivers/mmc/core/queue.c | 26 +++++++++++++-------------
>  drivers/mmc/core/queue.h |  1 +
>  3 files changed, 24 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index 27606e1382e5..70ec465beb69 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -1483,7 +1483,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
>                 blk_mq_end_request(req, BLK_STS_OK);
>         }
>
> -       spin_lock_irqsave(q->queue_lock, flags);
> +       spin_lock_irqsave(mq->lock, flags);
>
>         mq->in_flight[mmc_issue_type(mq, req)] -= 1;
>
> @@ -1491,7 +1491,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
>
>         mmc_cqe_check_busy(mq);
>
> -       spin_unlock_irqrestore(q->queue_lock, flags);
> +       spin_unlock_irqrestore(mq->lock, flags);
>
>         if (!mq->cqe_busy)
>                 blk_mq_run_hw_queues(q, true);
> @@ -1988,17 +1988,16 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
>
>  static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
>  {
> -       struct request_queue *q = req->q;
>         unsigned long flags;
>         bool put_card;
>
> -       spin_lock_irqsave(q->queue_lock, flags);
> +       spin_lock_irqsave(mq->lock, flags);
>
>         mq->in_flight[mmc_issue_type(mq, req)] -= 1;
>
>         put_card = (mmc_tot_in_flight(mq) == 0);
>
> -       spin_unlock_irqrestore(q->queue_lock, flags);
> +       spin_unlock_irqrestore(mq->lock, flags);
>
>         if (put_card)
>                 mmc_put_card(mq->card, &mq->ctx);
> @@ -2094,11 +2093,11 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
>                  * request does not need to wait (although it does need to
>                  * complete complete_req first).
>                  */
> -               spin_lock_irqsave(q->queue_lock, flags);
> +               spin_lock_irqsave(mq->lock, flags);
>                 mq->complete_req = req;
>                 mq->rw_wait = false;
>                 waiting = mq->waiting;
> -               spin_unlock_irqrestore(q->queue_lock, flags);
> +               spin_unlock_irqrestore(mq->lock, flags);
>
>                 /*
>                  * If 'waiting' then the waiting task will complete this
> @@ -2117,10 +2116,10 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
>         /* Take the recovery path for errors or urgent background operations */
>         if (mmc_blk_rq_error(&mqrq->brq) ||
>             mmc_blk_urgent_bkops_needed(mq, mqrq)) {
> -               spin_lock_irqsave(q->queue_lock, flags);
> +               spin_lock_irqsave(mq->lock, flags);
>                 mq->recovery_needed = true;
>                 mq->recovery_req = req;
> -               spin_unlock_irqrestore(q->queue_lock, flags);
> +               spin_unlock_irqrestore(mq->lock, flags);
>                 wake_up(&mq->wait);
>                 schedule_work(&mq->recovery_work);
>                 return;
> @@ -2136,7 +2135,6 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
>
>  static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
>  {
> -       struct request_queue *q = mq->queue;
>         unsigned long flags;
>         bool done;
>
> @@ -2144,7 +2142,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
>          * Wait while there is another request in progress, but not if recovery
>          * is needed. Also indicate whether there is a request waiting to start.
>          */
> -       spin_lock_irqsave(q->queue_lock, flags);
> +       spin_lock_irqsave(mq->lock, flags);
>         if (mq->recovery_needed) {
>                 *err = -EBUSY;
>                 done = true;
> @@ -2152,7 +2150,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
>                 done = !mq->rw_wait;
>         }
>         mq->waiting = !done;
> -       spin_unlock_irqrestore(q->queue_lock, flags);
> +       spin_unlock_irqrestore(mq->lock, flags);
>
>         return done;
>  }
> diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
> index 37617fb1f9de..ac6a5245275a 100644
> --- a/drivers/mmc/core/queue.c
> +++ b/drivers/mmc/core/queue.c
> @@ -89,9 +89,9 @@ void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
>         struct mmc_queue *mq = q->queuedata;
>         unsigned long flags;
>
> -       spin_lock_irqsave(q->queue_lock, flags);
> +       spin_lock_irqsave(mq->lock, flags);
>         __mmc_cqe_recovery_notifier(mq);
> -       spin_unlock_irqrestore(q->queue_lock, flags);
> +       spin_unlock_irqrestore(mq->lock, flags);
>  }
>
>  static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
> @@ -128,14 +128,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
>         unsigned long flags;
>         int ret;
>
> -       spin_lock_irqsave(q->queue_lock, flags);
> +       spin_lock_irqsave(mq->lock, flags);
>
>         if (mq->recovery_needed || !mq->use_cqe)
>                 ret = BLK_EH_RESET_TIMER;
>         else
>                 ret = mmc_cqe_timed_out(req);
>
> -       spin_unlock_irqrestore(q->queue_lock, flags);
> +       spin_unlock_irqrestore(mq->lock, flags);
>
>         return ret;
>  }
> @@ -157,9 +157,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
>
>         mq->in_recovery = false;
>
> -       spin_lock_irq(q->queue_lock);
> +       spin_lock_irq(mq->lock);
>         mq->recovery_needed = false;
> -       spin_unlock_irq(q->queue_lock);
> +       spin_unlock_irq(mq->lock);
>
>         mmc_put_card(mq->card, &mq->ctx);
>
> @@ -258,10 +258,10 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>
>         issue_type = mmc_issue_type(mq, req);
>
> -       spin_lock_irq(q->queue_lock);
> +       spin_lock_irq(mq->lock);
>
>         if (mq->recovery_needed || mq->busy) {
> -               spin_unlock_irq(q->queue_lock);
> +               spin_unlock_irq(mq->lock);
>                 return BLK_STS_RESOURCE;
>         }
>
> @@ -269,7 +269,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>         case MMC_ISSUE_DCMD:
>                 if (mmc_cqe_dcmd_busy(mq)) {
>                         mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
> -                       spin_unlock_irq(q->queue_lock);
> +                       spin_unlock_irq(mq->lock);
>                         return BLK_STS_RESOURCE;
>                 }
>                 break;
> @@ -294,7 +294,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>         get_card = (mmc_tot_in_flight(mq) == 1);
>         cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
>
> -       spin_unlock_irq(q->queue_lock);
> +       spin_unlock_irq(mq->lock);
>
>         if (!(req->rq_flags & RQF_DONTPREP)) {
>                 req_to_mmc_queue_req(req)->retries = 0;
> @@ -328,12 +328,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>         if (issued != MMC_REQ_STARTED) {
>                 bool put_card = false;
>
> -               spin_lock_irq(q->queue_lock);
> +               spin_lock_irq(mq->lock);
>                 mq->in_flight[issue_type] -= 1;
>                 if (mmc_tot_in_flight(mq) == 0)
>                         put_card = true;
>                 mq->busy = false;
> -               spin_unlock_irq(q->queue_lock);
> +               spin_unlock_irq(mq->lock);
>                 if (put_card)
>                         mmc_put_card(card, &mq->ctx);
>         } else {
> @@ -397,6 +397,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
>         int ret;
>
>         mq->card = card;
> +       mq->lock = lock;

Unless I am mistaken, it seems like the "lock" can also be removed as
an in-parameter to mmc_init_queue() - and instead do the
spin_lock_init() in here.

Moreover, that means we should drop the "lock" from the struct
mmc_blk_data and instead move it to struct mmc_queue (rather than
having a pointer to it.)

>         mq->use_cqe = host->cqe_enabled;
>
>         memset(&mq->tag_set, 0, sizeof(mq->tag_set));
> @@ -427,7 +428,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
>                 goto free_tag_set;
>         }
>
> -       mq->queue->queue_lock = lock;
>         mq->queue->queuedata = mq;
>         blk_queue_rq_timeout(mq->queue, 60 * HZ);
>
> diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
> index 29218e12900d..5421f1542e71 100644
> --- a/drivers/mmc/core/queue.h
> +++ b/drivers/mmc/core/queue.h
> @@ -73,6 +73,7 @@ struct mmc_queue_req {
>
>  struct mmc_queue {
>         struct mmc_card         *card;
> +       spinlock_t              *lock;
>         struct mmc_ctx          ctx;
>         struct blk_mq_tag_set   tag_set;
>         struct mmc_blk_data     *blkdata;
> --
> 2.19.1
>

Kind regards
Uffe

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass
  2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
@ 2018-11-15  6:50   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:50 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Unused since the removal of the legacy request code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-cgroup.c         | 15 ---------------
>   block/blk-core.c           | 21 ---------------------
>   block/blk-mq-debugfs.c     |  1 -
>   block/blk-throttle.c       |  3 ---
>   include/linux/blk-cgroup.h |  6 +-----
>   include/linux/blkdev.h     |  3 ---
>   6 files changed, 1 insertion(+), 48 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 02/16] block: remove deadline __deadline manipulation helpers
  2018-11-14 16:02 ` [PATCH 02/16] block: remove deadline __deadline manipulation helpers Christoph Hellwig
@ 2018-11-15  6:51   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:51 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> No users left since the removal of the legacy request interface, we can
> remove all the magic bit stealing now and make it a normal field.
> 
> But use WRITE_ONCE/READ_ONCE on the new deadline field, given that we
> don't seem to have any mechanism to guarantee a new value actually
> gets seen by other threads.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-mq.c         |  4 ++--
>   block/blk-timeout.c    |  8 +++++---
>   block/blk.h            | 35 -----------------------------------
>   include/linux/blkdev.h |  4 +---
>   4 files changed, 8 insertions(+), 43 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request
  2018-11-14 16:02 ` [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request Christoph Hellwig
@ 2018-11-15  6:51   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:51 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> There is nothing it could synchronize against, so don't go through
> the pains of acquiring the lock.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-timeout.c                 |  2 +-
>   drivers/ata/libata-eh.c             |  4 ----
>   drivers/block/mtip32xx/mtip32xx.c   |  5 +----
>   drivers/scsi/libsas/sas_ata.c       |  5 -----
>   drivers/scsi/libsas/sas_scsi_host.c | 10 ++--------
>   5 files changed, 4 insertions(+), 22 deletions(-)
> 
After all the pain we went through with aborts ...

Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 04/16] block: use atomic bitops for ->queue_flags
  2018-11-14 16:02 ` [PATCH 04/16] block: use atomic bitops for ->queue_flags Christoph Hellwig
@ 2018-11-15  6:55   ` Hannes Reinecke
  2018-11-15  9:04     ` Christoph Hellwig
  0 siblings, 1 reply; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:55 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> ->queue_flags is generally not set or cleared in the fast path, and also
> generally set or cleared one flag at a time.  Make use of the normal
> atomic bitops for it so that we don't need to take the queue_lock,
> which is otherwise mostly unused in the core block layer now.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-core.c       | 54 ++++++----------------------------------
>   block/blk-mq.c         |  2 +-
>   block/blk-settings.c   | 10 +++-----
>   block/blk-sysfs.c      | 28 +++++++++------------
>   block/blk.h            | 56 ------------------------------------------
>   include/linux/blkdev.h |  1 -
>   6 files changed, 24 insertions(+), 127 deletions(-)
> 
I wonder if we can't remove the 'blk_queue_flag_XXX' helpers and replace 
them with inlines ...

Otherwise:

Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 05/16] block: remove queue_lockdep_assert_held
  2018-11-14 16:02 ` [PATCH 05/16] block: remove queue_lockdep_assert_held Christoph Hellwig
@ 2018-11-15  6:55   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:55 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> The only remaining user unconditionally drops and reacquires the lock,
> which means we really don't need any additional (conditional) annotation.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-throttle.c |  1 -
>   block/blk.h          | 13 -------------
>   2 files changed, 14 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle
  2018-11-14 16:02 ` [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle Christoph Hellwig
@ 2018-11-15  6:56   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:56 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Unused now that the legacy request path is gone.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-iolatency.c | 24 ++++++------------------
>   block/blk-mq.c        |  2 +-
>   block/blk-rq-qos.c    |  5 ++---
>   block/blk-rq-qos.h    |  4 ++--
>   block/blk-wbt.c       | 16 ++++------------
>   5 files changed, 15 insertions(+), 36 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 07/16] block: update a few comments for the legacy request removal
  2018-11-14 16:02 ` [PATCH 07/16] block: update a few comments for the legacy request removal Christoph Hellwig
@ 2018-11-15  6:56   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:56 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Only the mq locking is left in the flush state machine.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-flush.c | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/block/blk-flush.c b/block/blk-flush.c
> index c53197dcdd70..fcd18b158fd6 100644
> --- a/block/blk-flush.c
> +++ b/block/blk-flush.c
> @@ -148,7 +148,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
>    * completion and trigger the next step.
>    *
>    * CONTEXT:
> - * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
> + * spin_lock_irq(fq->mq_flush_lock)
>    *
>    * RETURNS:
>    * %true if requests were added to the dispatch queue, %false otherwise.
> @@ -252,7 +252,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
>    * Please read the comment at the top of this file for more info.
>    *
>    * CONTEXT:
> - * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
> + * spin_lock_irq(fq->mq_flush_lock)
>    *
>    */
>   static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 08/16] block: remove a few unused exports
  2018-11-14 16:02 ` [PATCH 08/16] block: remove a few unused exports Christoph Hellwig
@ 2018-11-15  6:57   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:57 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-cgroup.c   | 6 ------
>   block/blk-ioc.c      | 3 ---
>   block/blk-mq-sysfs.c | 1 -
>   block/blk-softirq.c  | 1 -
>   block/blk-stat.c     | 4 ----
>   block/blk-wbt.c      | 2 --
>   6 files changed, 17 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue
  2018-11-14 16:02 ` [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue Christoph Hellwig
@ 2018-11-15  6:58   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:58 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Use a goto label to merge two identical pieces of error handling code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-cgroup.c | 22 ++++++++++------------
>   1 file changed, 10 insertions(+), 12 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all
  2018-11-14 16:02 ` [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all Christoph Hellwig
@ 2018-11-15  6:58   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:58 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-cgroup.c | 9 ++-------
>   1 file changed, 2 insertions(+), 7 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 11/16] drbd: don't override the queue_lock
  2018-11-14 16:02 ` [PATCH 11/16] drbd: don't override the queue_lock Christoph Hellwig
@ 2018-11-15  6:58   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:58 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> The DRBD req_lock and block layer queue_lock are used for entirely
> different resources.  Stop using the req_lock as the block layer
> queue_lock.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/block/drbd/drbd_main.c | 2 +-
>   1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
> index fa8204214ac0..b66c59ce6260 100644
> --- a/drivers/block/drbd/drbd_main.c
> +++ b/drivers/block/drbd/drbd_main.c
> @@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
>   
>   	drbd_init_set_defaults(device);
>   
> -	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock);
> +	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
>   	if (!q)
>   		goto out_no_q;
>   	device->rq_queue = q;
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 12/16] umem: don't override the queue_lock
  2018-11-14 16:02 ` [PATCH 12/16] umem: " Christoph Hellwig
@ 2018-11-15  6:59   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  6:59 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> The umem card->lock and the block layer queue_lock are used for entirely
> different resources.  Stop using card->lock as the block layer
> queue_lock.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/block/umem.c | 3 +--
>   1 file changed, 1 insertion(+), 2 deletions(-)
> 
> diff --git a/drivers/block/umem.c b/drivers/block/umem.c
> index be3e3ab79950..8a27b5adc2b3 100644
> --- a/drivers/block/umem.c
> +++ b/drivers/block/umem.c
> @@ -888,8 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
>   	card->biotail = &card->bio;
>   	spin_lock_init(&card->lock);
>   
> -	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE,
> -					   &card->lock);
> +	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
>   	if (!card->queue)
>   		goto failed_alloc;
>   
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 13/16] mmc: simplify queue initialization
  2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
  2018-11-14 17:31   ` Ulf Hansson
@ 2018-11-15  7:00   ` Hannes Reinecke
  1 sibling, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  7:00 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> Merge three functions initializing the queue into a single one, and drop
> an unused argument for it.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/mmc/core/block.c |  2 +-
>   drivers/mmc/core/queue.c | 86 ++++++++++++++--------------------------
>   drivers/mmc/core/queue.h |  3 +-
>   3 files changed, 32 insertions(+), 59 deletions(-)
> Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer
  2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
  2018-11-14 17:56   ` Ulf Hansson
@ 2018-11-15  7:00   ` Hannes Reinecke
  1 sibling, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  7:00 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> mmc uses the block layer struct request pointer to indirect their own
> lock to the mmc_queue structure, given that the original lock isn't
> reachable outside of block.c.  Add a lock pointer to struct mmc_queue
> instead and stop overriding the block layer lock which protects fields
> entirely separate from the mmc use.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/mmc/core/block.c | 22 ++++++++++------------
>   drivers/mmc/core/queue.c | 26 +++++++++++++-------------
>   drivers/mmc/core/queue.h |  1 +
>   3 files changed, 24 insertions(+), 25 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node
  2018-11-14 16:02 ` [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node Christoph Hellwig
@ 2018-11-15  7:00   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  7:00 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> With the legacy request path gone there is no real need to override the
> queue_lock.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-core.c               | 16 +++-------------
>   block/blk-mq.c                 |  2 +-
>   drivers/block/drbd/drbd_main.c |  2 +-
>   drivers/block/null_blk_main.c  |  3 +--
>   drivers/block/umem.c           |  2 +-
>   drivers/lightnvm/core.c        |  2 +-
>   drivers/md/dm.c                |  2 +-
>   drivers/nvdimm/pmem.c          |  2 +-
>   drivers/nvme/host/multipath.c  |  2 +-
>   include/linux/blkdev.h         |  3 +--
>   10 files changed, 12 insertions(+), 24 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 16/16] block: remove the queue_lock indirection
  2018-11-14 16:02 ` [PATCH 16/16] block: remove the queue_lock indirection Christoph Hellwig
@ 2018-11-15  7:01   ` Hannes Reinecke
  0 siblings, 0 replies; 40+ messages in thread
From: Hannes Reinecke @ 2018-11-15  7:01 UTC (permalink / raw)
  To: Christoph Hellwig, Jens Axboe; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 5:02 PM, Christoph Hellwig wrote:
> With the legacy request path gone there is no good reason to keep
> queue_lock as a pointer, we can always use the embedded lock now.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/bfq-cgroup.c      |  2 +-
>   block/bfq-iosched.c     | 16 +++++------
>   block/blk-cgroup.c      | 60 ++++++++++++++++++++---------------------
>   block/blk-core.c        | 10 +------
>   block/blk-ioc.c         | 14 +++++-----
>   block/blk-iolatency.c   |  4 +--
>   block/blk-mq-sched.c    |  4 +--
>   block/blk-pm.c          | 20 +++++++-------
>   block/blk-pm.h          |  6 ++---
>   block/blk-sysfs.c       |  4 +--
>   block/blk-throttle.c    | 22 +++++++--------
>   drivers/block/pktcdvd.c |  4 +--
>   drivers/ide/ide-pm.c    | 10 +++----
>   include/linux/blkdev.h  |  8 +-----
>   14 files changed, 85 insertions(+), 99 deletions(-)
> 
Reviewed-by: Hannes Reinecke <hare@suse.com>

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		   Teamlead Storage & Networking
hare@suse.de			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 13/16] mmc: simplify queue initialization
  2018-11-14 17:31   ` Ulf Hansson
@ 2018-11-15  9:02     ` Christoph Hellwig
  0 siblings, 0 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-15  9:02 UTC (permalink / raw)
  To: Ulf Hansson
  Cc: Christoph Hellwig, Jens Axboe, linux-block, linux-mmc, drbd-dev

On Wed, Nov 14, 2018 at 06:31:45PM +0100, Ulf Hansson wrote:
> > + * @subname: partition subname
> 
> Drop subname :-)

Fixed.

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer
  2018-11-14 17:56   ` Ulf Hansson
@ 2018-11-15  9:03     ` Christoph Hellwig
  0 siblings, 0 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-15  9:03 UTC (permalink / raw)
  To: Ulf Hansson
  Cc: Christoph Hellwig, Jens Axboe, linux-block, linux-mmc, drbd-dev

On Wed, Nov 14, 2018 at 06:56:41PM +0100, Ulf Hansson wrote:
> >         } else {
> > @@ -397,6 +397,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
> >         int ret;
> >
> >         mq->card = card;
> > +       mq->lock = lock;
> 
> Unless I am mistaken, it seems like the "lock" can also be removed as
> an in-parameter to mmc_init_queue() - and instead do the
> spin_lock_init() in here.
> 
> Moreover, that means we should drop the "lock" from the struct
> mmc_blk_data and instead move it to struct mmc_queue (rather than
> having a pointer to it.)

Which sounds like a sensible idead indeed, I'll look into it.

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 04/16] block: use atomic bitops for ->queue_flags
  2018-11-15  6:55   ` Hannes Reinecke
@ 2018-11-15  9:04     ` Christoph Hellwig
  0 siblings, 0 replies; 40+ messages in thread
From: Christoph Hellwig @ 2018-11-15  9:04 UTC (permalink / raw)
  To: Hannes Reinecke
  Cc: Christoph Hellwig, Jens Axboe, linux-block, linux-mmc, drbd-dev

On Thu, Nov 15, 2018 at 07:55:02AM +0100, Hannes Reinecke wrote:
>> Signed-off-by: Christoph Hellwig <hch@lst.de>
>> ---
>>   block/blk-core.c       | 54 ++++++----------------------------------
>>   block/blk-mq.c         |  2 +-
>>   block/blk-settings.c   | 10 +++-----
>>   block/blk-sysfs.c      | 28 +++++++++------------
>>   block/blk.h            | 56 ------------------------------------------
>>   include/linux/blkdev.h |  1 -
>>   6 files changed, 24 insertions(+), 127 deletions(-)
>>
> I wonder if we can't remove the 'blk_queue_flag_XXX' helpers and replace 
> them with inlines ...

I'd prefer to just open code the *bit helpers as that is a lot more
reasonable.  But that is quite a lot of churn, so I'll only do that if
I get previous buy-in to that idea, and preferably in a separate series.

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: remove more legacy request leftover
  2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
                   ` (15 preceding siblings ...)
  2018-11-14 16:02 ` [PATCH 16/16] block: remove the queue_lock indirection Christoph Hellwig
@ 2018-11-15 19:14 ` Jens Axboe
  2018-11-15 19:20   ` Jens Axboe
  16 siblings, 1 reply; 40+ messages in thread
From: Jens Axboe @ 2018-11-15 19:14 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block, linux-mmc, drbd-dev

On 11/14/18 9:02 AM, Christoph Hellwig wrote:
> Hi Jens,
> 
> this series removes another bunch of legacy request leftovers,
> including the pointer indirection for the queue_lock.

Applied, with the subname part removed as mentioned in #13.

> Note that we have very few queue_lock users left, I wonder if
> we should get rid of it entirely and have separate locks for
> the cgroup and I/O scheduler code, which are the only heavy
> users?

Probably not worth it...

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: remove more legacy request leftover
  2018-11-15 19:14 ` remove more legacy request leftover Jens Axboe
@ 2018-11-15 19:20   ` Jens Axboe
  0 siblings, 0 replies; 40+ messages in thread
From: Jens Axboe @ 2018-11-15 19:20 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-block, linux-mmc, drbd-dev

On 11/15/18 12:14 PM, Jens Axboe wrote:
> On 11/14/18 9:02 AM, Christoph Hellwig wrote:
>> Hi Jens,
>>
>> this series removes another bunch of legacy request leftovers,
>> including the pointer indirection for the queue_lock.
> 
> Applied, with the subname part removed as mentioned in #13.

Your patch #16 missed a few spots, I fixed it up. JFYI.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 40+ messages in thread

end of thread, other threads:[~2018-11-15 19:20 UTC | newest]

Thread overview: 40+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
2018-11-15  6:50   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 02/16] block: remove deadline __deadline manipulation helpers Christoph Hellwig
2018-11-15  6:51   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request Christoph Hellwig
2018-11-15  6:51   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 04/16] block: use atomic bitops for ->queue_flags Christoph Hellwig
2018-11-15  6:55   ` Hannes Reinecke
2018-11-15  9:04     ` Christoph Hellwig
2018-11-14 16:02 ` [PATCH 05/16] block: remove queue_lockdep_assert_held Christoph Hellwig
2018-11-15  6:55   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle Christoph Hellwig
2018-11-15  6:56   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 07/16] block: update a few comments for the legacy request removal Christoph Hellwig
2018-11-15  6:56   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 08/16] block: remove a few unused exports Christoph Hellwig
2018-11-15  6:57   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 11/16] drbd: don't override the queue_lock Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 12/16] umem: " Christoph Hellwig
2018-11-15  6:59   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
2018-11-14 17:31   ` Ulf Hansson
2018-11-15  9:02     ` Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
2018-11-14 17:56   ` Ulf Hansson
2018-11-15  9:03     ` Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 16/16] block: remove the queue_lock indirection Christoph Hellwig
2018-11-15  7:01   ` Hannes Reinecke
2018-11-15 19:14 ` remove more legacy request leftover Jens Axboe
2018-11-15 19:20   ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).