All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org, linux-mmc@vger.kernel.org,
	drbd-dev@lists.linbit.com
Subject: [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle
Date: Wed, 14 Nov 2018 17:02:09 +0100	[thread overview]
Message-ID: <20181114160219.28328-7-hch@lst.de> (raw)
In-Reply-To: <20181114160219.28328-1-hch@lst.de>

Unused now that the legacy request path is gone.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-iolatency.c | 24 ++++++------------------
 block/blk-mq.c        |  2 +-
 block/blk-rq-qos.c    |  5 ++---
 block/blk-rq-qos.h    |  4 ++--
 block/blk-wbt.c       | 16 ++++------------
 5 files changed, 15 insertions(+), 36 deletions(-)

diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 38c35c32aff2..8edf1b353ad1 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
 
 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 				       struct iolatency_grp *iolat,
-				       spinlock_t *lock, bool issue_as_root,
+				       bool issue_as_root,
 				       bool use_memdelay)
-	__releases(lock)
-	__acquires(lock)
 {
 	struct rq_wait *rqw = &iolat->rq_wait;
 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
@@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 		if (iolatency_may_queue(iolat, &wait, first_block))
 			break;
 		first_block = false;
-
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else {
-			io_schedule();
-		}
+		io_schedule();
 	} while (1);
 
 	finish_wait(&rqw->wait, &wait);
@@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
 	scale_change(iolat, direction > 0);
 }
 
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
-				     spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 {
 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 	struct blkcg *blkcg;
@@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 	bio_associate_blkcg(bio, &blkcg->css);
 	blkg = blkg_lookup(blkcg, q);
 	if (unlikely(!blkg)) {
-		if (!lock)
-			spin_lock_irq(q->queue_lock);
+		spin_lock_irq(q->queue_lock);
 		blkg = blkg_lookup_create(blkcg, q);
 		if (IS_ERR(blkg))
 			blkg = NULL;
-		if (!lock)
-			spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(q->queue_lock);
 	}
 	if (!blkg)
 		goto out;
@@ -518,7 +506,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 		}
 
 		check_scale_change(iolat);
-		__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
 		blkg = blkg->parent;
 	}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e2717e843727..a3f057fdd045 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1886,7 +1886,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	if (blk_mq_sched_bio_merge(q, bio))
 		return BLK_QC_T_NONE;
 
-	rq_qos_throttle(q, bio, NULL);
+	rq_qos_throttle(q, bio);
 
 	rq = blk_mq_get_request(q, bio, &data);
 	if (unlikely(!rq)) {
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd568dd..f8a4d3fbb98c 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -67,14 +67,13 @@ void rq_qos_requeue(struct request_queue *q, struct request *rq)
 	}
 }
 
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
-		     spinlock_t *lock)
+void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 {
 	struct rq_qos *rqos;
 
 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
 		if (rqos->ops->throttle)
-			rqos->ops->throttle(rqos, bio, lock);
+			rqos->ops->throttle(rqos, bio);
 	}
 }
 
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02efbfa66..b6b11d496007 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -25,7 +25,7 @@ struct rq_qos {
 };
 
 struct rq_qos_ops {
-	void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+	void (*throttle)(struct rq_qos *, struct bio *);
 	void (*track)(struct rq_qos *, struct request *, struct bio *);
 	void (*issue)(struct rq_qos *, struct request *);
 	void (*requeue)(struct rq_qos *, struct request *);
@@ -103,7 +103,7 @@ void rq_qos_done(struct request_queue *, struct request *);
 void rq_qos_issue(struct request_queue *, struct request *);
 void rq_qos_requeue(struct request_queue *, struct request *);
 void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_throttle(struct request_queue *, struct bio *);
 void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
 void rq_qos_exit(struct request_queue *);
 #endif
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0fc222d4194b..e5a66c574683 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -521,9 +521,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
-		       unsigned long rw, spinlock_t *lock)
-	__releases(lock)
-	__acquires(lock)
+		       unsigned long rw)
 {
 	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 	struct wbt_wait_data data = {
@@ -561,13 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 			break;
 		}
 
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else
-			io_schedule();
-
+		io_schedule();
 		has_sleeper = false;
 	} while (1);
 
@@ -624,7 +616,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 {
 	struct rq_wb *rwb = RQWB(rqos);
 	enum wbt_flags flags;
@@ -636,7 +628,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
 		return;
 	}
 
-	__wbt_wait(rwb, flags, bio->bi_opf, lock);
+	__wbt_wait(rwb, flags, bio->bi_opf);
 
 	if (!blk_stat_is_active(rwb->cb))
 		rwb_arm_timer(rwb);
-- 
2.19.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: Jens Axboe <axboe-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
Cc: linux-block-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-mmc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	drbd-dev-cunTk1MwBs8qoQakbn7OcQ@public.gmane.org
Subject: [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle
Date: Wed, 14 Nov 2018 17:02:09 +0100	[thread overview]
Message-ID: <20181114160219.28328-7-hch@lst.de> (raw)
In-Reply-To: <20181114160219.28328-1-hch-jcswGhMUV9g@public.gmane.org>

Unused now that the legacy request path is gone.

Signed-off-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
---
 block/blk-iolatency.c | 24 ++++++------------------
 block/blk-mq.c        |  2 +-
 block/blk-rq-qos.c    |  5 ++---
 block/blk-rq-qos.h    |  4 ++--
 block/blk-wbt.c       | 16 ++++------------
 5 files changed, 15 insertions(+), 36 deletions(-)

diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 38c35c32aff2..8edf1b353ad1 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
 
 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 				       struct iolatency_grp *iolat,
-				       spinlock_t *lock, bool issue_as_root,
+				       bool issue_as_root,
 				       bool use_memdelay)
-	__releases(lock)
-	__acquires(lock)
 {
 	struct rq_wait *rqw = &iolat->rq_wait;
 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
@@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 		if (iolatency_may_queue(iolat, &wait, first_block))
 			break;
 		first_block = false;
-
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else {
-			io_schedule();
-		}
+		io_schedule();
 	} while (1);
 
 	finish_wait(&rqw->wait, &wait);
@@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
 	scale_change(iolat, direction > 0);
 }
 
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
-				     spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 {
 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 	struct blkcg *blkcg;
@@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 	bio_associate_blkcg(bio, &blkcg->css);
 	blkg = blkg_lookup(blkcg, q);
 	if (unlikely(!blkg)) {
-		if (!lock)
-			spin_lock_irq(q->queue_lock);
+		spin_lock_irq(q->queue_lock);
 		blkg = blkg_lookup_create(blkcg, q);
 		if (IS_ERR(blkg))
 			blkg = NULL;
-		if (!lock)
-			spin_unlock_irq(q->queue_lock);
+		spin_unlock_irq(q->queue_lock);
 	}
 	if (!blkg)
 		goto out;
@@ -518,7 +506,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
 		}
 
 		check_scale_change(iolat);
-		__blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
 		blkg = blkg->parent;
 	}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e2717e843727..a3f057fdd045 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1886,7 +1886,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	if (blk_mq_sched_bio_merge(q, bio))
 		return BLK_QC_T_NONE;
 
-	rq_qos_throttle(q, bio, NULL);
+	rq_qos_throttle(q, bio);
 
 	rq = blk_mq_get_request(q, bio, &data);
 	if (unlikely(!rq)) {
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd568dd..f8a4d3fbb98c 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -67,14 +67,13 @@ void rq_qos_requeue(struct request_queue *q, struct request *rq)
 	}
 }
 
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
-		     spinlock_t *lock)
+void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 {
 	struct rq_qos *rqos;
 
 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
 		if (rqos->ops->throttle)
-			rqos->ops->throttle(rqos, bio, lock);
+			rqos->ops->throttle(rqos, bio);
 	}
 }
 
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02efbfa66..b6b11d496007 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -25,7 +25,7 @@ struct rq_qos {
 };
 
 struct rq_qos_ops {
-	void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+	void (*throttle)(struct rq_qos *, struct bio *);
 	void (*track)(struct rq_qos *, struct request *, struct bio *);
 	void (*issue)(struct rq_qos *, struct request *);
 	void (*requeue)(struct rq_qos *, struct request *);
@@ -103,7 +103,7 @@ void rq_qos_done(struct request_queue *, struct request *);
 void rq_qos_issue(struct request_queue *, struct request *);
 void rq_qos_requeue(struct request_queue *, struct request *);
 void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_throttle(struct request_queue *, struct bio *);
 void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
 void rq_qos_exit(struct request_queue *);
 #endif
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0fc222d4194b..e5a66c574683 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -521,9 +521,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
-		       unsigned long rw, spinlock_t *lock)
-	__releases(lock)
-	__acquires(lock)
+		       unsigned long rw)
 {
 	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 	struct wbt_wait_data data = {
@@ -561,13 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 			break;
 		}
 
-		if (lock) {
-			spin_unlock_irq(lock);
-			io_schedule();
-			spin_lock_irq(lock);
-		} else
-			io_schedule();
-
+		io_schedule();
 		has_sleeper = false;
 	} while (1);
 
@@ -624,7 +616,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 {
 	struct rq_wb *rwb = RQWB(rqos);
 	enum wbt_flags flags;
@@ -636,7 +628,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
 		return;
 	}
 
-	__wbt_wait(rwb, flags, bio->bi_opf, lock);
+	__wbt_wait(rwb, flags, bio->bi_opf);
 
 	if (!blk_stat_is_active(rwb->cb))
 		rwb_arm_timer(rwb);
-- 
2.19.1

  parent reply	other threads:[~2018-11-14 16:02 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-14 16:02 remove more legacy request leftover Christoph Hellwig
2018-11-14 16:02 ` Christoph Hellwig
2018-11-14 16:02 ` [PATCH 01/16] block: remove QUEUE_FLAG_BYPASS and ->bypass Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:50   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 02/16] block: remove deadline __deadline manipulation helpers Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:51   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 03/16] block: don't hold the queue_lock over blk_abort_request Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:51   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 04/16] block: use atomic bitops for ->queue_flags Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:55   ` Hannes Reinecke
2018-11-15  9:04     ` Christoph Hellwig
2018-11-15  9:04       ` Christoph Hellwig
2018-11-14 16:02 ` [PATCH 05/16] block: remove queue_lockdep_assert_held Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:55   ` Hannes Reinecke
2018-11-14 16:02 ` Christoph Hellwig [this message]
2018-11-14 16:02   ` [PATCH 06/16] block-iolatency: remove the unused lock argument to rq_qos_throttle Christoph Hellwig
2018-11-15  6:56   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 07/16] block: update a few comments for the legacy request removal Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:56   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 08/16] block: remove a few unused exports Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:57   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 09/16] blk-cgroup: consolidate error handling in blkcg_init_queue Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 10/16] blk-cgroup: move locking into blkg_destroy_all Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 11/16] drbd: don't override the queue_lock Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:58   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 12/16] umem: " Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  6:59   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 13/16] mmc: simplify queue initialization Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-14 17:31   ` Ulf Hansson
2018-11-15  9:02     ` Christoph Hellwig
2018-11-15  9:02       ` Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 14/16] mmc: stop abusing the request queue_lock pointer Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-14 17:56   ` Ulf Hansson
2018-11-15  9:03     ` Christoph Hellwig
2018-11-15  9:03       ` Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 15/16] block: remove the lock argument to blk_alloc_queue_node Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  7:00   ` Hannes Reinecke
2018-11-14 16:02 ` [PATCH 16/16] block: remove the queue_lock indirection Christoph Hellwig
2018-11-14 16:02   ` Christoph Hellwig
2018-11-15  7:01   ` Hannes Reinecke
2018-11-15 19:14 ` remove more legacy request leftover Jens Axboe
2018-11-15 19:14   ` Jens Axboe
2018-11-15 19:20   ` Jens Axboe
2018-11-15 19:20     ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181114160219.28328-7-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=drbd-dev@lists.linbit.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-mmc@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.