From: Paolo Valente <paolo.valente@linaro.org>
To: Jens Axboe <axboe@kernel.dk>, Tejun Heo <tj@kernel.org>
Cc: Fabio Checconi <fchecconi@gmail.com>,
Arianna Avanzini <avanzini.arianna@gmail.com>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
ulf.hansson@linaro.org, linus.walleij@linaro.org,
broonie@kernel.org, Paolo Valente <paolo.valente@linaro.org>
Subject: [PATCH RFC 07/22] block, cfq: get rid of workload type
Date: Mon, 1 Feb 2016 23:12:43 +0100 [thread overview]
Message-ID: <1454364778-25179-8-git-send-email-paolo.valente@linaro.org> (raw)
In-Reply-To: <1454364778-25179-1-git-send-email-paolo.valente@linaro.org>
From: Arianna Avanzini <avanzini.arianna@gmail.com>
CFQ selects the queue to serve also according to the type of workload
it is part of. This kind of heuristic has no match in BFQ, where a
high throughput, and, at the same time, provable service guarantees
are provided through a unified overall scheduling policy.
Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
---
block/cfq-iosched.c | 131 +++++++++++-----------------------------------------
1 file changed, 26 insertions(+), 105 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 48ab681..15ee70d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -155,15 +155,6 @@ enum wl_class_t {
CFQ_PRIO_NR,
};
-/*
- * Second index in the service_trees.
- */
-enum wl_type_t {
- ASYNC_WORKLOAD = 0,
- SYNC_NOIDLE_WORKLOAD = 1,
- SYNC_WORKLOAD = 2
-};
-
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
@@ -179,20 +170,16 @@ struct cfq_data {
/*
* rr lists of queues with requests. We maintain service trees for
- * RT and BE classes. These trees are subdivided in subclasses
- * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
- * class there is no subclassification and all the cfq queues go on
- * a single tree service_tree_idle.
+ * RT and BE classes.
* Counts are embedded in the cfq_rb_root
*/
- struct cfq_rb_root service_trees[2][3];
+ struct cfq_rb_root service_trees[2];
struct cfq_rb_root service_tree_idle;
/*
* The priority currently being served
*/
enum wl_class_t serving_wl_class;
- enum wl_type_t serving_wl_type;
unsigned long workload_expires;
unsigned int busy_queues;
@@ -291,9 +278,8 @@ CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d%c " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
##args)
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -301,12 +287,12 @@ CFQ_CFQQ_FNS(wait_busy);
/* Traverses through cfq service trees */
#define for_each_st(cfqd, i, j, st) \
for (i = 0; i <= IDLE_WORKLOAD; i++) \
- for (j = 0, st = i < IDLE_WORKLOAD ? &cfqd->service_trees[i][j]\
+ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqd->service_trees[i]\
: &cfqd->service_tree_idle; \
- (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
- (i == IDLE_WORKLOAD && j == 0); \
- j++, st = i < IDLE_WORKLOAD ? \
- &cfqd->service_trees[i][j] : NULL) \
+ (i < IDLE_WORKLOAD) || \
+ (i == IDLE_WORKLOAD); \
+ st = i < IDLE_WORKLOAD ? \
+ &cfqd->service_trees[i] : NULL) \
static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
struct cfq_ttime *ttime)
@@ -327,33 +313,6 @@ static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
return BE_WORKLOAD;
}
-
-static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
-{
- if (!cfq_cfqq_sync(cfqq))
- return ASYNC_WORKLOAD;
- if (!cfq_cfqq_idle_window(cfqq))
- return SYNC_NOIDLE_WORKLOAD;
- return SYNC_WORKLOAD;
-}
-
-static inline int cfq_busy_queues_wl(enum wl_class_t wl_class,
- struct cfq_data *cfqd)
-{
- if (wl_class == IDLE_WORKLOAD)
- return cfqd->service_tree_idle.count;
-
- return cfqd->service_trees[wl_class][ASYNC_WORKLOAD].count +
- cfqd->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
- cfqd->service_trees[wl_class][SYNC_WORKLOAD].count;
-}
-
-static inline int cfq_busy_async_queues(struct cfq_data *cfqd)
-{
- return cfqd->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
- cfqd->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
-}
-
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
struct cfq_io_cq *cic, struct bio *bio);
@@ -677,7 +636,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int left;
int new_cfqq = 1;
- st = &cfqd->service_trees[cfqq_class(cfqq)][cfqq_type(cfqq)];
+ st = &cfqd->service_trees[cfqq_class(cfqq)];
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
parent = rb_last(&st->rb);
@@ -999,8 +958,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
- cfqd->serving_wl_class, cfqd->serving_wl_type);
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d",
+ cfqd->serving_wl_class);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -1073,9 +1032,7 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
*/
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
- struct cfq_rb_root *st =
- &cfqd->service_trees[cfqd->serving_wl_class]
- [cfqd->serving_wl_type];
+ struct cfq_rb_root *st = &cfqd->service_trees[cfqd->serving_wl_class];
if (!cfqd->rq_queued)
return NULL;
@@ -1201,6 +1158,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
}
+static inline int cfq_busy_queues_wl(enum wl_class_t wl_class,
+ struct cfq_data *cfqd)
+{
+ if (wl_class == IDLE_WORKLOAD)
+ return cfqd->service_tree_idle.count;
+
+ return cfqd->service_trees[wl_class].count;
+}
+
/*
* Move request from internal lists to the request queue dispatch list.
*/
@@ -1253,29 +1219,6 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
}
-static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
- enum wl_class_t wl_class)
-{
- struct cfq_queue *queue;
- int i;
- bool key_valid = false;
- unsigned long lowest_key = 0;
- enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
-
- for (i = 0; i <= SYNC_WORKLOAD; ++i) {
- /* select the one with lowest rb_key */
- queue = cfq_rb_first(&cfqd->service_trees[wl_class][i]);
- if (queue &&
- (!key_valid || time_before(queue->rb_key, lowest_key))) {
- lowest_key = queue->rb_key;
- cur_best = i;
- key_valid = true;
- }
- }
-
- return cur_best;
-}
-
static void
choose_wl_class_and_type(struct cfq_data *cfqd)
{
@@ -1298,13 +1241,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd)
if (original_class != cfqd->serving_wl_class)
goto new_workload;
- /*
- * For RT and BE, we have to choose also the type
- * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
- * expiration time
- */
- st = &cfqd->service_trees[cfqd->serving_wl_class]
- [cfqd->serving_wl_type];
+ st = &cfqd->service_trees[cfqd->serving_wl_class];
count = st->count;
/*
@@ -1314,26 +1251,11 @@ choose_wl_class_and_type(struct cfq_data *cfqd)
return;
new_workload:
- /* otherwise select new workload type */
- cfqd->serving_wl_type = cfq_choose_wl_type(cfqd,
- cfqd->serving_wl_class);
- st = &cfqd->service_trees[cfqd->serving_wl_class]
- [cfqd->serving_wl_type];
+ st = &cfqd->service_trees[cfqd->serving_wl_class];
count = st->count;
- if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
- slice = cfqd->cfq_target_latency *
- cfq_busy_async_queues(cfqd);
- slice = slice/cfqd->busy_queues;
-
- /* async workload slice is scaled down according to
- * the sync/async slice ratio. */
- slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
- } else
- /* sync workload slice is 2 * cfq_slice_idle */
- slice = 2 * cfqd->cfq_slice_idle;
-
- slice = max_t(unsigned, slice, CFQ_MIN_TT);
+ /* sync workload slice is 2 * cfq_slice_idle */
+ slice = max_t(unsigned, 2 * cfqd->cfq_slice_idle, CFQ_MIN_TT);
cfq_log(cfqd, "workload slice:%d", slice);
cfqd->workload_expires = jiffies + slice;
}
@@ -2078,8 +2000,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_cfqq_on_rr(cfqq))
st = cfqq->service_tree;
else
- st = &cfqd->service_trees[cfqq_class(cfqq)]
- [cfqq_type(cfqq)];
+ st = &cfqd->service_trees[cfqq_class(cfqq)];
st->ttime.last_end_request = now;
if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
--
1.9.1
next prev parent reply other threads:[~2016-02-01 22:48 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-01 22:12 [PATCH RFC 00/22] Replace the CFQ I/O Scheduler with BFQ Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 01/22] block, cfq: remove queue merging for close cooperators Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 02/22] block, cfq: remove close-based preemption Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 03/22] block, cfq: remove deep seek queues logic Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 04/22] block, cfq: remove SSD-related logic Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 05/22] block, cfq: get rid of hierarchical support Paolo Valente
2016-02-10 23:04 ` Tejun Heo
2016-02-01 22:12 ` [PATCH RFC 06/22] block, cfq: get rid of queue preemption Paolo Valente
2016-02-01 22:12 ` Paolo Valente [this message]
2016-02-01 22:12 ` [PATCH RFC 08/22] block, cfq: get rid of latency tunables Paolo Valente
2016-02-10 23:05 ` Tejun Heo
2016-02-01 22:12 ` [PATCH RFC 09/22] block, cfq: replace CFQ with the BFQ-v0 I/O scheduler Paolo Valente
2016-02-11 22:22 ` Tejun Heo
2016-02-12 0:35 ` Mark Brown
2016-02-17 15:57 ` Tejun Heo
2016-02-17 16:02 ` Mark Brown
2016-02-17 17:04 ` Tejun Heo
2016-02-17 18:13 ` Jonathan Corbet
2016-02-17 19:45 ` Tejun Heo
2016-02-17 19:56 ` Jonathan Corbet
2016-02-17 20:14 ` Tejun Heo
2016-02-17 9:02 ` Paolo Valente
2016-02-17 17:02 ` Tejun Heo
2016-02-20 10:23 ` Paolo Valente
2016-02-20 11:02 ` Paolo Valente
2016-03-01 18:46 ` Tejun Heo
2016-03-04 17:29 ` Linus Walleij
2016-03-04 17:39 ` Christoph Hellwig
2016-03-04 18:10 ` Austin S. Hemmelgarn
2016-03-11 11:16 ` Christoph Hellwig
2016-03-11 13:38 ` Austin S. Hemmelgarn
2016-03-05 12:18 ` Linus Walleij
2016-03-11 11:17 ` Christoph Hellwig
2016-03-11 11:24 ` Nikolay Borisov
2016-03-11 11:49 ` Christoph Hellwig
2016-03-11 14:53 ` Linus Walleij
2016-03-09 6:55 ` Paolo Valente
2016-04-13 19:54 ` Tejun Heo
2016-04-14 5:03 ` Mark Brown
2016-03-09 6:34 ` Paolo Valente
2016-04-13 20:41 ` Tejun Heo
2016-04-14 10:23 ` Paolo Valente
2016-04-14 16:29 ` Tejun Heo
2016-04-15 14:20 ` Paolo Valente
2016-04-15 15:08 ` Tejun Heo
2016-04-15 16:17 ` Paolo Valente
2016-04-15 19:29 ` Tejun Heo
2016-04-15 22:08 ` Paolo Valente
2016-04-15 22:45 ` Tejun Heo
2016-04-16 6:03 ` Paolo Valente
2016-04-15 14:49 ` Linus Walleij
2016-02-01 22:12 ` [PATCH RFC 10/22] block, bfq: add full hierarchical scheduling and cgroups support Paolo Valente
2016-02-11 22:28 ` Tejun Heo
2016-02-17 9:07 ` Paolo Valente
2016-02-17 17:14 ` Tejun Heo
2016-02-17 17:45 ` Tejun Heo
2016-04-20 9:32 ` Paolo
2016-04-22 18:13 ` Tejun Heo
2016-04-22 18:19 ` Paolo Valente
2016-04-22 18:41 ` Tejun Heo
2016-04-22 19:05 ` Paolo Valente
2016-04-22 19:32 ` Tejun Heo
2016-04-23 7:07 ` Paolo Valente
2016-04-25 19:24 ` Tejun Heo
2016-04-25 20:30 ` Paolo
2016-05-06 20:20 ` Paolo Valente
2016-05-12 13:11 ` Paolo
2016-07-27 16:13 ` [PATCH RFC V8 00/22] Replace the CFQ I/O Scheduler with BFQ Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 01/22] block, cfq: remove queue merging for close cooperators Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 02/22] block, cfq: remove close-based preemption Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 03/22] block, cfq: remove deep seek queues logic Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 04/22] block, cfq: remove SSD-related logic Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 05/22] block, cfq: get rid of hierarchical support Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 06/22] block, cfq: get rid of queue preemption Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 07/22] block, cfq: get rid of workload type Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 08/22] block, cfq: get rid of latency tunables Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 09/22] block, cfq: replace CFQ with the BFQ-v0 I/O scheduler Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 10/22] block, bfq: add full hierarchical scheduling and cgroups support Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 11/22] block, bfq: improve throughput boosting Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 12/22] block, bfq: modify the peak-rate estimator Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 13/22] block, bfq: add more fairness with writes and slow processes Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 14/22] block, bfq: improve responsiveness Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 15/22] block, bfq: reduce I/O latency for soft real-time applications Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 16/22] block, bfq: preserve a low latency also with NCQ-capable drives Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 17/22] block, bfq: reduce latency during request-pool saturation Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 18/22] block, bfq: add Early Queue Merge (EQM) Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 19/22] block, bfq: reduce idling only in symmetric scenarios Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 20/22] block, bfq: boost the throughput on NCQ-capable flash-based devices Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 21/22] block, bfq: boost the throughput with random I/O on NCQ-capable HDDs Paolo Valente
2016-07-27 16:13 ` [PATCH RFC V8 22/22] block, bfq: handle bursts of queue activations Paolo Valente
2016-07-28 16:50 ` [PATCH RFC V8 00/22] Replace the CFQ I/O Scheduler with BFQ Paolo
2016-02-01 22:12 ` [PATCH RFC 11/22] block, bfq: improve throughput boosting Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 12/22] block, bfq: modify the peak-rate estimator Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 13/22] block, bfq: add more fairness to boost throughput and reduce latency Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 14/22] block, bfq: improve responsiveness Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 15/22] block, bfq: reduce I/O latency for soft real-time applications Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 16/22] block, bfq: preserve a low latency also with NCQ-capable drives Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 17/22] block, bfq: reduce latency during request-pool saturation Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 18/22] block, bfq: add Early Queue Merge (EQM) Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 19/22] block, bfq: reduce idling only in symmetric scenarios Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 20/22] block, bfq: boost the throughput on NCQ-capable flash-based devices Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 21/22] block, bfq: boost the throughput with random I/O on NCQ-capable HDDs Paolo Valente
2016-02-01 22:12 ` [PATCH RFC 22/22] block, bfq: handle bursts of queue activations Paolo Valente
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1454364778-25179-8-git-send-email-paolo.valente@linaro.org \
--to=paolo.valente@linaro.org \
--cc=avanzini.arianna@gmail.com \
--cc=axboe@kernel.dk \
--cc=broonie@kernel.org \
--cc=fchecconi@gmail.com \
--cc=linus.walleij@linaro.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=tj@kernel.org \
--cc=ulf.hansson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).